summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compass-deck/Dockerfile9
-rw-r--r--compass-deck/README.md2
-rw-r--r--compass-deck/actions/__init__.py13
-rw-r--r--compass-deck/actions/clean.py192
-rw-r--r--compass-deck/actions/cli.py179
-rw-r--r--compass-deck/actions/install_callback.py181
-rw-r--r--compass-deck/actions/poll_switch.py162
-rw-r--r--compass-deck/actions/update_progress.py298
-rw-r--r--compass-deck/actions/util.py342
-rw-r--r--compass-deck/api/__init__.py42
-rw-r--r--compass-deck/api/api.0
-rw-r--r--compass-deck/api/api.py3391
-rw-r--r--compass-deck/api/api.raml4027
-rw-r--r--compass-deck/api/auth_handler.py49
-rw-r--r--compass-deck/api/exception_handler.py92
-rw-r--r--compass-deck/api/utils.py35
-rw-r--r--compass-deck/api/v1/__init__.py0
-rw-r--r--compass-deck/api/v1/api.py248
-rw-r--r--compass-deck/apiclient/__init__.py0
-rwxr-xr-xcompass-deck/apiclient/example.py463
-rw-r--r--compass-deck/apiclient/restful.py1102
-rw-r--r--compass-deck/apiclient/v1/__init__.py0
-rwxr-xr-xcompass-deck/apiclient/v1/example.py305
-rw-r--r--compass-deck/apiclient/v1/restful.py655
-rw-r--r--compass-deck/bin/README.md66
-rwxr-xr-xcompass-deck/bin/ansible_callbacks/playbook_done.py96
-rwxr-xr-xcompass-deck/bin/chef/addcookbooks.py54
-rwxr-xr-xcompass-deck/bin/chef/adddatabags.py71
-rwxr-xr-xcompass-deck/bin/chef/addroles.py62
-rwxr-xr-xcompass-deck/bin/chef/clean_clients.sh6
-rwxr-xr-xcompass-deck/bin/chef/clean_environments.sh13
-rwxr-xr-xcompass-deck/bin/chef/clean_nodes.sh6
-rwxr-xr-xcompass-deck/bin/clean_installation_logs.py50
-rwxr-xr-xcompass-deck/bin/clean_installers.py163
-rwxr-xr-xcompass-deck/bin/client.py1006
-rwxr-xr-xcompass-deck/bin/client.sh2
-rwxr-xr-xcompass-deck/bin/cobbler/remove_systems.sh9
-rwxr-xr-xcompass-deck/bin/compass_check.py31
-rwxr-xr-xcompass-deck/bin/compass_wsgi.py42
-rwxr-xr-xcompass-deck/bin/compassd43
-rwxr-xr-xcompass-deck/bin/csvdeploy.py333
-rwxr-xr-xcompass-deck/bin/delete_clusters.py73
-rwxr-xr-xcompass-deck/bin/manage_db.py165
-rwxr-xr-xcompass-deck/bin/poll_switch.py113
-rwxr-xr-xcompass-deck/bin/progress_update.py72
-rwxr-xr-xcompass-deck/bin/query_switch.py143
-rwxr-xr-xcompass-deck/bin/refresh.sh3
-rwxr-xr-xcompass-deck/bin/refresh_agent.sh22
-rwxr-xr-xcompass-deck/bin/refresh_server.sh22
-rwxr-xr-xcompass-deck/bin/runserver.py37
-rwxr-xr-xcompass-deck/bin/switch_virtualenv.py30
-rwxr-xr-xcompass-deck/build.sh66
-rw-r--r--compass-deck/db/__init__.py13
-rw-r--r--compass-deck/db/api/__init__.py13
-rw-r--r--compass-deck/db/api/adapter.py313
-rw-r--r--compass-deck/db/api/adapter_holder.py155
-rw-r--r--compass-deck/db/api/cluster.py2444
-rw-r--r--compass-deck/db/api/database.py264
-rw-r--r--compass-deck/db/api/health_check_report.py190
-rw-r--r--compass-deck/db/api/host.py1120
-rw-r--r--compass-deck/db/api/machine.py317
-rw-r--r--compass-deck/db/api/metadata.py517
-rw-r--r--compass-deck/db/api/metadata_holder.py731
-rw-r--r--compass-deck/db/api/network.py160
-rw-r--r--compass-deck/db/api/permission.py357
-rw-r--r--compass-deck/db/api/switch.py1213
-rw-r--r--compass-deck/db/api/user.py553
-rw-r--r--compass-deck/db/api/user_log.py82
-rw-r--r--compass-deck/db/api/utils.py1286
-rw-r--r--compass-deck/db/callback.py204
-rw-r--r--compass-deck/db/config_validation/__init__.py0
-rw-r--r--compass-deck/db/config_validation/default_validator.py131
-rw-r--r--compass-deck/db/config_validation/extension/__init__.py0
-rw-r--r--compass-deck/db/config_validation/extension/openstack.py18
-rw-r--r--compass-deck/db/exception.py116
-rw-r--r--compass-deck/db/models.py1924
-rw-r--r--compass-deck/db/v1/model.py724
-rw-r--r--compass-deck/db/validator.py195
-rw-r--r--compass-deck/deployment/__init__.py15
-rw-r--r--compass-deck/deployment/deploy_manager.py237
-rw-r--r--compass-deck/deployment/installers/__init__.py21
-rw-r--r--compass-deck/deployment/installers/config_manager.py527
-rw-r--r--compass-deck/deployment/installers/installer.py291
-rw-r--r--compass-deck/deployment/installers/os_installers/__init__.py13
-rw-r--r--compass-deck/deployment/installers/os_installers/cobbler/__init__.py13
-rw-r--r--compass-deck/deployment/installers/os_installers/cobbler/cobbler.py449
-rw-r--r--compass-deck/deployment/installers/pk_installers/__init__.py13
-rw-r--r--compass-deck/deployment/installers/pk_installers/ansible_installer/__init__.py0
-rw-r--r--compass-deck/deployment/installers/pk_installers/ansible_installer/ansible_installer.py401
-rw-r--r--compass-deck/deployment/utils/__init__.py15
-rw-r--r--compass-deck/deployment/utils/constants.py84
-rw-r--r--compass-deck/misc/Dockerfile86
-rw-r--r--compass-deck/misc/adapter_changes/Debian.yml18
-rw-r--r--compass-deck/misc/adapter_changes/HA-ansible-multinodes.yml239
-rw-r--r--compass-deck/misc/adapter_changes/keystone_install.yml74
-rw-r--r--compass-deck/misc/adapter_changes/preseed_post_anamon_local80
-rw-r--r--compass-deck/misc/adapter_changes/preseed_post_anamon_remote80
-rw-r--r--compass-deck/misc/apache/README15
-rw-r--r--compass-deck/misc/apache/cobbler_web.conf10
-rw-r--r--compass-deck/misc/apache/http_pip.conf9
-rw-r--r--compass-deck/misc/apache/images.conf9
-rw-r--r--compass-deck/misc/apache/ods-server.conf18
-rw-r--r--compass-deck/misc/apache/packages.conf9
-rw-r--r--compass-deck/misc/apache/ssl.conf221
-rw-r--r--compass-deck/misc/chef-server/chef-server.rb4
-rwxr-xr-xcompass-deck/misc/ci/prepare_node_compass.sh28
-rwxr-xr-xcompass-deck/misc/ci/pxe-deploy.sh14
-rwxr-xr-xcompass-deck/misc/ci/pxe-prepare.sh29
-rwxr-xr-xcompass-deck/misc/ci/tempest_run.sh81
-rwxr-xr-xcompass-deck/misc/ci/test-install.sh22
-rw-r--r--compass-deck/misc/compass_install.repo5
-rw-r--r--compass-deck/misc/hosts3
-rw-r--r--compass-deck/misc/logrotate.d/httpd9
-rw-r--r--compass-deck/misc/logrotate.d/ntp9
-rw-r--r--compass-deck/misc/logrotate.d/squid9
-rw-r--r--compass-deck/misc/logrotate.d/syslog13
-rw-r--r--compass-deck/misc/logrotate.d/yum7
-rw-r--r--compass-deck/misc/logstash-forwarder/logstash-forwarder.conf57
-rw-r--r--compass-deck/misc/logstash-forwarder/logstash-forwarder.crt29
-rw-r--r--compass-deck/misc/logstash-forwarder/logstash-forwarder.repo6
-rw-r--r--compass-deck/misc/ntp/ntp.conf60
-rw-r--r--compass-deck/misc/rsync14
-rw-r--r--compass-deck/misc/rsyslog/rsyslog.conf97
-rw-r--r--compass-deck/misc/snmp/snmp.conf1
-rw-r--r--compass-deck/misc/squid/squid.conf71
-rw-r--r--compass-deck/requirements.txt24
-rw-r--r--compass-deck/setup.py98
-rwxr-xr-xcompass-deck/start.sh7
-rw-r--r--compass-deck/tasks/__init__.py13
-rw-r--r--compass-deck/tasks/client.py33
-rw-r--r--compass-deck/tasks/tasks.py326
-rw-r--r--compass-deck/utils/__init__.py13
-rw-r--r--compass-deck/utils/celeryconfig_wrapper.py44
-rw-r--r--compass-deck/utils/daemonize.py76
-rw-r--r--compass-deck/utils/flags.py91
-rw-r--r--compass-deck/utils/logsetting.py108
-rw-r--r--compass-deck/utils/setting_wrapper.py175
-rw-r--r--compass-deck/utils/util.py395
138 files changed, 32544 insertions, 0 deletions
diff --git a/compass-deck/Dockerfile b/compass-deck/Dockerfile
new file mode 100644
index 0000000..764e7f1
--- /dev/null
+++ b/compass-deck/Dockerfile
@@ -0,0 +1,9 @@
+FROM huangxiangyu/centos-systemd
+
+ADD . /root/compass-deck
+
+RUN /root/compass-deck/build.sh
+
+EXPOSE 80
+
+CMD ["/sbin/init", "/usr/local/bin/start.sh"]
diff --git a/compass-deck/README.md b/compass-deck/README.md
new file mode 100644
index 0000000..c17dbe1
--- /dev/null
+++ b/compass-deck/README.md
@@ -0,0 +1,2 @@
+# compass-deck
+RESTful API for Compass
diff --git a/compass-deck/actions/__init__.py b/compass-deck/actions/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-deck/actions/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/actions/clean.py b/compass-deck/actions/clean.py
new file mode 100644
index 0000000..8cb00b5
--- /dev/null
+++ b/compass-deck/actions/clean.py
@@ -0,0 +1,192 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to clean installers
+"""
+import chef
+import logging
+import xmlrpclib
+
+from compass.actions import util
+
+
+class CobblerInstaller(object):
+ """cobbler installer"""
+ CREDENTIALS = "credentials"
+ USERNAME = 'username'
+ PASSWORD = 'password'
+
+ INSTALLER_URL = "cobbler_url"
+
+ def __init__(self, settings):
+ username = settings[self.CREDENTIALS][self.USERNAME]
+ password = settings[self.CREDENTIALS][self.PASSWORD]
+ cobbler_url = settings[self.INSTALLER_URL]
+ try:
+ self.remote = xmlrpclib.Server(cobbler_url)
+ self.token = self.remote.login(username, password)
+ logging.info('cobbler %s client created', cobbler_url)
+ except Exception as error:
+ logging.error(
+ 'failed to login %s with (%s, %s)',
+ cobbler_url, username, password
+ )
+ logging.exception(error)
+
+ def clean(self):
+ systems = self.remote.get_systems()
+ for system in systems:
+ system_name = system['name']
+ try:
+ self.remote.remove_system(system_name, self.token)
+ logging.info('system %s is removed', system_name)
+ except Exception as error:
+ logging.error(
+ 'failed to remove system %s', system_name
+ )
+ logging.exception(error)
+
+
+class AnsibleInstaller(object):
+
+ def __init__(self, settings):
+ return
+
+ def clean(self):
+ pass
+
+
+class ChefInstaller(object):
+ DATABAGS = "databags"
+ CHEFSERVER_URL = "chef_url"
+ CHEFSERVER_DNS = "chef_server_dns"
+ CHEFSERVER_IP = "chef_server_ip"
+ KEY_DIR = "key_dir"
+ CLIENT = "client_name"
+
+ def __init__(self, settings):
+ installer_url = settings.get(self.CHEFSERVER_URL, None)
+ key_dir = settings.get(self.KEY_DIR, None)
+ client = settings.get(self.CLIENT, None)
+ try:
+ if installer_url and key_dir and client:
+ self.api = chef.ChefAPI(installer_url, key_dir, client)
+ else:
+ self.api = chef.autoconfigure()
+ logging.info(
+ 'chef client created %s(%s, %s)',
+ installer_url, key_dir, client
+ )
+ except Exception as error:
+ logging.error(
+ 'failed to create chef client %s(%s, %s)',
+ installer_url, key_dir, client
+ )
+ logging.exception(error)
+
+ def clean(self):
+ try:
+ for node_name in chef.Node.list(api=self.api):
+ node = chef.Node(node_name, api=self.api)
+ node.delete()
+ logging.info('delete node %s', node_name)
+ except Exception as error:
+ logging.error('failed to delete some nodes')
+ logging.exception(error)
+
+ try:
+ for client_name in chef.Client.list(api=self.api):
+ if client_name in ['chef-webui', 'chef-validator']:
+ continue
+ client = chef.Client(client_name, api=self.api)
+ client.delete()
+ logging.info('delete client %s', client_name)
+ except Exception as error:
+ logging.error('failed to delete some clients')
+ logging.exception(error)
+
+ try:
+ for env_name in chef.Environment.list(api=self.api):
+ if env_name == '_default':
+ continue
+ env = chef.Environment(env_name, api=self.api)
+ env.delete()
+ logging.info('delete env %s', env_name)
+ except Exception as error:
+ logging.error('failed to delete some envs')
+ logging.exception(error)
+
+ try:
+ for databag_name in chef.DataBag.list(api=self.api):
+ databag = chef.DataBag(databag_name, api=self.api)
+ for item_name, item in databag.items():
+ item.delete()
+ logging.info(
+ 'delete item %s from databag %s',
+ item_name, databag_name
+ )
+ except Exception as error:
+ logging.error('failed to delete some databag items')
+ logging.exception(error)
+
+
+OS_INSTALLERS = {
+ 'cobbler': CobblerInstaller
+}
+PK_INSTALLERS = {
+ 'chef_installer': ChefInstaller,
+ 'ansible_installer': AnsibleInstaller
+}
+
+
+def clean_os_installer(
+ os_installer_name, os_installer_settings
+):
+ with util.lock('serialized_action', timeout=100) as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to clean os installer'
+ )
+
+ if os_installer_name not in OS_INSTALLERS:
+ logging.error(
+ '%s not found in os_installers',
+ os_installer_name
+ )
+
+ os_installer = OS_INSTALLERS[os_installer_name](
+ os_installer_settings
+ )
+ os_installer.clean()
+
+
+def clean_package_installer(
+ package_installer_name, package_installer_settings
+):
+ with util.lock('serialized_action', timeout=100) as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to clean package installer'
+ )
+
+ if package_installer_name not in PK_INSTALLERS:
+ logging.error(
+ '%s not found in os_installers',
+ package_installer_name
+ )
+
+ package_installer = PK_INSTALLERS[package_installer_name](
+ package_installer_settings
+ )
+ package_installer.clean()
diff --git a/compass-deck/actions/cli.py b/compass-deck/actions/cli.py
new file mode 100644
index 0000000..c9058ed
--- /dev/null
+++ b/compass-deck/actions/cli.py
@@ -0,0 +1,179 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass Command Line Interface"""
+import logging
+import subprocess
+import sys
+
+from compass.actions.health_check import check
+from compass.db.api import database
+
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+from compass.utils.util import pretty_print
+
+
+ACTION_MAP = {
+ "check": "apache celery dhcp dns hds misc os_installer "
+ "package_installer squid tftp".split(" "),
+ "refresh": "db sync".split(" "),
+}
+
+
+class BootCLI(object):
+ """CLI to do compass check."""
+
+ def __init__(self):
+ return
+
+ def run(self, args):
+ """cli takes the commands and calls respective modules."""
+ action = self.get_action(args)
+ if action is None:
+ self.print_help()
+ else:
+ module = self.get_module(action, args)
+ if module == "invalid":
+ self.print_help(action)
+ else:
+ method = "self.run_" + action + "(module)"
+ eval(method)
+
+ @classmethod
+ def get_action(cls, args):
+ """This method returns an action type.
+
+ .. note::
+ For 'compass check dhcp' command, it will return 'check'.
+ """
+ if len(args) == 1:
+ return None
+ elif args[1] in ACTION_MAP.keys():
+ return args[1]
+ return None
+
+ @classmethod
+ def get_module(cls, action, args):
+ """This method returns a module.
+
+ .. note::
+ For 'compass check dhcp' command, it will return 'dhcp'.
+ """
+ if len(args) <= 2:
+ return None
+ elif args[2] in ACTION_MAP[action]:
+ return args[2]
+ return "invalid"
+
+ def run_check(self, module=None):
+ """This provides a flexible sanity check.
+
+ .. note::
+ param module default set to None.
+ if parameter module is none. Compass checks all modules.
+ If module specified, Compass will only check such module.
+ """
+ if module is None:
+ pretty_print("Starting: Compass Health Check",
+ "==============================")
+ chk = check.BootCheck()
+ res = chk.run()
+ self.output_check_result(res)
+
+ else:
+ pretty_print("Checking Module: %s" % module,
+ "============================")
+ chk = check.BootCheck()
+ method = "chk._check_" + module + "()"
+ res = eval(method)
+ print "\n".join(msg for msg in res[1])
+
+ @classmethod
+ def output_check_result(cls, result):
+ """output check result."""
+ if result == {}:
+ return
+ pretty_print("\n",
+ "===============================",
+ "* Compass Health Check Report *",
+ "===============================")
+ successful = True
+ for key in result.keys():
+ if result[key][0] == 0:
+ successful = False
+ print "%s" % "\n".join(item for item in result[key][1])
+
+ print "===================="
+ if successful is True:
+ print "Compass Check completes. No problems found, all systems go"
+ sys.exit(0)
+ else:
+ print (
+ "Compass has ERRORS shown above. Please fix them before "
+ "deploying!")
+ sys.exit(1)
+
+ @classmethod
+ def run_refresh(cls, action=None):
+ """Run refresh."""
+ # TODO(xicheng): replace refresh.sh with refresh.py
+ if action is None:
+ pretty_print("Refreshing Compass...",
+ "=================")
+ subprocess.Popen(
+ ['/opt/compass/bin/refresh.sh'], shell=True)
+ elif action == "db":
+ pretty_print("Refreshing Compass Database...",
+ "===================")
+ subprocess.Popen(
+ ['/opt/compass/bin/manage_db.py createdb'], shell=True)
+ else:
+ pretty_print("Syncing with Installers...",
+ "================")
+ subprocess.Popen(
+ ['/opt/compass/bin/manage_db.py sync_from_installers'],
+ shell=True
+ )
+
+ @classmethod
+ def print_help(cls, module_help=""):
+ """print help."""
+ if module_help == "":
+ pretty_print("usage\n=====",
+ "compass <refresh|check>",
+ "type 'compass {action} --help' for detailed "
+ "command list")
+
+ elif module_help == "refresh":
+ pretty_print("usage\n=====",
+ "compass refresh [%s]" %
+ "|".join(action for action in ACTION_MAP['refresh']))
+
+ else:
+ pretty_print("usage\n=====",
+ "compass check [%s]" %
+ "|".join(action for action in ACTION_MAP['check']))
+ sys.exit(2)
+
+
+def main():
+ """Compass cli entry point."""
+ flags.init()
+ logsetting.init()
+ database.init()
+ cli = BootCLI()
+ output = cli.run(sys.argv)
+ return sys.exit(output)
diff --git a/compass-deck/actions/install_callback.py b/compass-deck/actions/install_callback.py
new file mode 100644
index 0000000..aae955a
--- /dev/null
+++ b/compass-deck/actions/install_callback.py
@@ -0,0 +1,181 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to receive installation callback.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from compass.actions import util
+from compass.db.api import cluster as cluster_api
+from compass.db.api import host as host_api
+from compass.db.api import user as user_db
+from compass.deployment.deploy_manager import DeployManager
+from compass.deployment.utils import constants as const
+
+
+def os_installed(
+ host_id, clusterhosts_ready, clusters_os_ready,
+ username=None
+):
+ """Callback when os is installed.
+
+ :param host_id: host that os is installed.
+ :type host_id: integer
+ :param clusterhosts_ready: the clusterhosts that should trigger ready.
+ :param clusters_os_ready: the cluster that should trigger os ready.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to '
+ 'do the post action after os installation'
+ )
+ logging.info(
+ 'os installed on host %s '
+ 'with cluster host ready %s cluster os ready %s',
+ host_id, clusterhosts_ready, clusters_os_ready
+ )
+ if username:
+ user = user_db.get_user_object(username)
+ else:
+ user = None
+ os_installed_triggered = False
+ for cluster_id, clusterhost_ready in clusterhosts_ready.items():
+ if not clusterhost_ready and os_installed_triggered:
+ continue
+
+ cluster_info = util.ActionHelper.get_cluster_info(
+ cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, [host_id], user)
+
+ deploy_manager = DeployManager(
+ adapter_info, cluster_info, hosts_info)
+
+ if not os_installed_triggered:
+ deploy_manager.os_installed()
+ util.ActionHelper.host_ready(host_id, True, user)
+ os_installed_triggered = True
+
+ if clusterhost_ready:
+ # deploy_manager.cluster_os_installed()
+ util.ActionHelper.cluster_host_ready(
+ cluster_id, host_id, False, user
+ )
+
+ if util.ActionHelper.is_cluster_os_ready(cluster_id, user):
+ logging.info("deploy_manager begin cluster_os_installed")
+ deploy_manager.cluster_os_installed()
+
+
+def package_installed(
+ cluster_id, host_id, cluster_ready,
+ host_ready, username=None
+):
+ """Callback when package is installed.
+
+ :param cluster_id: cluster id.
+ :param host_id: host id.
+ :param cluster_ready: if the cluster should trigger ready.
+ :param host_ready: if the host should trigger ready.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to '
+ 'do the post action after package installation'
+ )
+ logging.info(
+ 'package installed on cluster %s host %s '
+ 'with cluster ready %s host ready %s',
+ cluster_id, host_id, cluster_ready, host_ready
+ )
+
+ if username:
+ user = user_db.get_user_object(username)
+ else:
+ user = None
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, [host_id], user)
+
+ deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
+
+ deploy_manager.package_installed()
+ util.ActionHelper.cluster_host_ready(cluster_id, host_id, True, user)
+ if cluster_ready:
+ util.ActionHelper.cluster_ready(cluster_id, False, user)
+ if host_ready:
+ util.ActionHelper.host_ready(host_id, False, user)
+
+
+def cluster_installed(
+ cluster_id, clusterhosts_ready,
+ username=None
+):
+ """Callback when cluster is installed.
+
+ :param cluster_id: cluster id
+ :param clusterhosts_ready: clusterhosts that should trigger ready.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to '
+ 'do the post action after cluster installation'
+ )
+ logging.info(
+ 'package installed on cluster %s with clusterhosts ready %s',
+ cluster_id, clusterhosts_ready
+ )
+ if username:
+ user = user_db.get_user_object(username)
+ else:
+ user = None
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, clusterhosts_ready.keys(), user)
+
+ deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
+
+ deploy_manager.cluster_installed()
+ util.ActionHelper.cluster_ready(cluster_id, True, user)
+ for host_id, clusterhost_ready in clusterhosts_ready.items():
+ if clusterhost_ready:
+ util.ActionHelper.cluster_host_ready(
+ cluster_id, host_id, False, user
+ )
diff --git a/compass-deck/actions/poll_switch.py b/compass-deck/actions/poll_switch.py
new file mode 100644
index 0000000..5c29b01
--- /dev/null
+++ b/compass-deck/actions/poll_switch.py
@@ -0,0 +1,162 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provider function to poll switch."""
+import logging
+import netaddr
+
+from compass.actions import util
+from compass.db.api import database
+from compass.db.api import switch as switch_api
+from compass.db.api import user as user_api
+from compass.hdsdiscovery.hdmanager import HDManager
+
+
+def _poll_switch(ip_addr, credentials, req_obj='mac', oper="SCAN"):
+ """Poll switch by ip addr.
+
+
+ Args:
+ ip_addr: ip addr of the switch.
+ credentials: credentials of the switch.
+
+ Returns: switch attributes dict and list of machine attributes dict.
+ """
+ under_monitoring = 'under_monitoring'
+ unreachable = 'unreachable'
+ polling_error = 'error'
+ hdmanager = HDManager()
+ vendor, state, err_msg = hdmanager.get_vendor(ip_addr, credentials)
+ if not vendor:
+ logging.info("*****error_msg: %s****", err_msg)
+ logging.error('no vendor found or match switch %s', ip_addr)
+ return (
+ {
+ 'vendor': vendor, 'state': state, 'err_msg': err_msg
+ }, {
+ }
+ )
+
+ logging.debug(
+ 'hdmanager learn switch from %s', ip_addr
+ )
+ results = []
+ try:
+ results = hdmanager.learn(
+ ip_addr, credentials, vendor, req_obj, oper
+ )
+ except Exception as error:
+ logging.exception(error)
+ state = unreachable
+ err_msg = (
+ 'SNMP walk for querying MAC addresses timedout'
+ )
+ return (
+ {
+ 'vendor': vendor, 'state': state, 'err_msg': err_msg
+ }, {
+ }
+ )
+
+ logging.info("pollswitch %s result: %s", ip_addr, results)
+ if not results:
+ logging.error(
+ 'no result learned from %s', ip_addr
+ )
+ state = polling_error
+ err_msg = 'No result learned from SNMP walk'
+ return (
+ {'vendor': vendor, 'state': state, 'err_msg': err_msg},
+ {}
+ )
+
+ logging.info('poll switch result: %s' % str(results))
+ machine_dicts = {}
+ for machine in results:
+ mac = machine['mac']
+ port = machine['port']
+ vlan = int(machine['vlan'])
+ if vlan:
+ vlans = [vlan]
+ else:
+ vlans = []
+ if mac not in machine_dicts:
+ machine_dicts[mac] = {'mac': mac, 'port': port, 'vlans': vlans}
+ else:
+ machine_dicts[mac]['port'] = port
+ machine_dicts[mac]['vlans'].extend(vlans)
+
+ logging.debug('update switch %s state to under monitoring', ip_addr)
+ state = under_monitoring
+ return (
+ {'vendor': vendor, 'state': state, 'err_msg': err_msg},
+ machine_dicts.values()
+ )
+
+
+def poll_switch(poller_email, ip_addr, credentials,
+ req_obj='mac', oper="SCAN"):
+ """Query switch and update switch machines.
+
+ .. note::
+ When polling switch succeeds, for each mac it got from polling switch,
+ A Machine record associated with the switch is added to the database.
+
+ :param ip_addr: switch ip address.
+ :type ip_addr: str
+ :param credentials: switch crednetials.
+ :type credentials: dict
+ :param req_obj: the object requested to query from switch.
+ :type req_obj: str
+ :param oper: the operation to query the switch.
+ :type oper: str, should be one of ['SCAN', 'GET', 'SET']
+
+ .. note::
+ The function should be called out of database session scope.
+ """
+ poller = user_api.get_user_object(poller_email)
+ ip_int = long(netaddr.IPAddress(ip_addr))
+ with util.lock('poll switch %s' % ip_addr, timeout=120) as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to poll switch %s' % ip_addr
+ )
+
+ # TODO(grace): before repoll the switch, set the state to repolling.
+ # and when the poll switch is timeout, set the state to error.
+ # the frontend should only consider some main state like INTIALIZED,
+ # ERROR and SUCCESSFUL, REPOLLING is as an intermediate state to
+ # indicate the switch is in learning the mac of the machines connected
+ # to it.
+ logging.debug('poll switch: %s', ip_addr)
+ switch_dict, machine_dicts = _poll_switch(
+ ip_addr, credentials, req_obj=req_obj, oper=oper
+ )
+ switches = switch_api.list_switches(ip_int=ip_int, user=poller)
+ if not switches:
+ logging.error('no switch found for %s', ip_addr)
+ return
+
+ for switch in switches:
+ for machine_dict in machine_dicts:
+ logging.info('add machine: %s', machine_dict)
+ machine_dict['owner_id'] = poller.id
+ switch_api.add_switch_machine(
+ switch['id'], False, user=poller, **machine_dict
+ )
+ switch_api.update_switch(
+ switch['id'],
+ user=poller,
+ **switch_dict
+ )
diff --git a/compass-deck/actions/update_progress.py b/compass-deck/actions/update_progress.py
new file mode 100644
index 0000000..67a9963
--- /dev/null
+++ b/compass-deck/actions/update_progress.py
@@ -0,0 +1,298 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to update status and installing progress of the given cluster.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from compass.actions import util
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import cluster as cluster_api
+from compass.db.api import host as host_api
+from compass.db.api import user as user_api
+from compass.log_analyzor import progress_calculator
+from compass.utils import setting_wrapper as setting
+
+
+def update_progress():
+ """Update status and installing progress of the given cluster.
+
+ :param cluster_hosts: clusters and hosts in each cluster to update.
+ :type cluster_hosts: dict of int or str to list of int or str
+
+ .. note::
+ The function should be called out of the database session scope.
+ In the function, it will update the database cluster_state and
+ host_state table for the deploying cluster and hosts.
+
+ The function will also query log_progressing_history table to get
+ the lastest installing progress and the position of log it has
+ processed in the last run. The function uses these information to
+ avoid recalculate the progress from the beginning of the log file.
+ After the progress got updated, these information will be stored back
+ to the log_progressing_history for next time run.
+ """
+ with util.lock('log_progressing', timeout=60, blocking=False) as lock:
+ if not lock:
+ logging.error(
+ 'failed to acquire lock to calculate installation progress'
+ )
+ return
+
+ logging.info('update installing progress')
+
+ user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
+ hosts = host_api.list_hosts(user=user)
+ host_mapping = {}
+ for host in hosts:
+ if 'id' not in host:
+ logging.error('id is not in host %s', host)
+ continue
+ host_id = host['id']
+ if 'os_name' not in host:
+ logging.error('os_name is not in host %s', host)
+ continue
+ if 'os_installer' not in host:
+ logging.error('os_installer is not in host %s', host)
+ continue
+ host_dirname = setting.HOST_INSTALLATION_LOGDIR_NAME
+ if host_dirname not in host:
+ logging.error(
+ '%s is not in host %s', host_dirname, host
+ )
+ continue
+ host_state = host_api.get_host_state(host_id, user=user)
+ if 'state' not in host_state:
+ logging.error('state is not in host state %s', host_state)
+ continue
+ if host_state['state'] == 'INSTALLING':
+ host_log_histories = host_api.get_host_log_histories(
+ host_id, user=user
+ )
+ host_log_history_mapping = {}
+ for host_log_history in host_log_histories:
+ if 'filename' not in host_log_history:
+ logging.error(
+ 'filename is not in host log history %s',
+ host_log_history
+ )
+ continue
+ host_log_history_mapping[
+ host_log_history['filename']
+ ] = host_log_history
+ host_mapping[host_id] = (
+ host, host_state, host_log_history_mapping
+ )
+ else:
+ logging.info(
+ 'ignore host state %s since it is not in installing',
+ host_state
+ )
+ adapters = adapter_api.list_adapters(user=user)
+ adapter_mapping = {}
+ for adapter in adapters:
+ if 'id' not in adapter:
+ logging.error(
+ 'id not in adapter %s', adapter
+ )
+ continue
+ if 'package_installer' not in adapter:
+ logging.info(
+ 'package_installer not in adapter %s', adapter
+ )
+ continue
+ adapter_id = adapter['id']
+ adapter_mapping[adapter_id] = adapter
+ clusters = cluster_api.list_clusters(user=user)
+ cluster_mapping = {}
+ for cluster in clusters:
+ if 'id' not in cluster:
+ logging.error('id not in cluster %s', cluster)
+ continue
+ cluster_id = cluster['id']
+ if 'adapter_id' not in cluster:
+ logging.error(
+ 'adapter_id not in cluster %s',
+ cluster
+ )
+ continue
+ cluster_state = cluster_api.get_cluster_state(
+ cluster_id,
+ user=user
+ )
+ if 'state' not in cluster_state:
+ logging.error('state not in cluster state %s', cluster_state)
+ continue
+ cluster_mapping[cluster_id] = (cluster, cluster_state)
+ clusterhosts = cluster_api.list_clusterhosts(user=user)
+ clusterhost_mapping = {}
+ for clusterhost in clusterhosts:
+ if 'clusterhost_id' not in clusterhost:
+ logging.error(
+ 'clusterhost_id not in clusterhost %s',
+ clusterhost
+ )
+ continue
+ clusterhost_id = clusterhost['clusterhost_id']
+ if 'cluster_id' not in clusterhost:
+ logging.error(
+ 'cluster_id not in clusterhost %s',
+ clusterhost
+ )
+ continue
+ cluster_id = clusterhost['cluster_id']
+ if cluster_id not in cluster_mapping:
+ logging.info(
+ 'ignore clusterhost %s '
+ 'since the cluster_id '
+ 'is not in cluster_mapping %s',
+ clusterhost, cluster_mapping
+ )
+ continue
+ cluster, _ = cluster_mapping[cluster_id]
+ if 'flavor_name' not in cluster:
+ logging.error(
+ 'flavor_name is not in clusterhost %s related cluster',
+ clusterhost
+ )
+ continue
+ clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME
+ if clusterhost_dirname not in clusterhost:
+ logging.error(
+ '%s is not in clusterhost %s',
+ clusterhost_dirname, clusterhost
+ )
+ continue
+ adapter_id = cluster['adapter_id']
+ if adapter_id not in adapter_mapping:
+ logging.info(
+ 'ignore clusterhost %s '
+ 'since the adapter_id %s '
+ 'is not in adaper_mapping %s',
+ clusterhost, adapter_id, adapter_mapping
+ )
+ continue
+ adapter = adapter_mapping[adapter_id]
+ if 'package_installer' not in adapter:
+ logging.info(
+ 'ignore clusterhost %s '
+ 'since the package_installer is not define '
+ 'in adapter %s',
+ clusterhost, adapter
+ )
+ continue
+ package_installer = adapter['package_installer']
+ clusterhost['package_installer'] = package_installer
+ clusterhost['adapter_name'] = adapter['name']
+ clusterhost_state = cluster_api.get_clusterhost_self_state(
+ clusterhost_id, user=user
+ )
+ if 'state' not in clusterhost_state:
+ logging.error(
+ 'state not in clusterhost_state %s',
+ clusterhost_state
+ )
+ continue
+ if clusterhost_state['state'] == 'INSTALLING':
+ clusterhost_log_histories = (
+ cluster_api.get_clusterhost_log_histories(
+ clusterhost_id, user=user
+ )
+ )
+ clusterhost_log_history_mapping = {}
+ for clusterhost_log_history in clusterhost_log_histories:
+ if 'filename' not in clusterhost_log_history:
+ logging.error(
+ 'filename not in clusterhost_log_history %s',
+ clusterhost_log_history
+ )
+ continue
+ clusterhost_log_history_mapping[
+ clusterhost_log_history['filename']
+ ] = clusterhost_log_history
+ clusterhost_mapping[clusterhost_id] = (
+ clusterhost, clusterhost_state,
+ clusterhost_log_history_mapping
+ )
+ else:
+ logging.info(
+ 'ignore clusterhost state %s '
+ 'since it is not in installing',
+ clusterhost_state
+ )
+
+ progress_calculator.update_host_progress(
+ host_mapping)
+ for host_id, (host, host_state, host_log_history_mapping) in (
+ host_mapping.items()
+ ):
+ host_api.update_host_state(
+ host_id, user=user,
+ percentage=host_state.get('percentage', 0),
+ message=host_state.get('message', ''),
+ severity=host_state.get('severity', 'INFO')
+ )
+ for filename, host_log_history in (
+ host_log_history_mapping.items()
+ ):
+ host_api.add_host_log_history(
+ host_id, filename=filename, user=user,
+ position=host_log_history.get('position', 0),
+ percentage=host_log_history.get('percentage', 0),
+ partial_line=host_log_history.get('partial_line', ''),
+ message=host_log_history.get('message', ''),
+ severity=host_log_history.get('severity', 'INFO'),
+ line_matcher_name=host_log_history.get(
+ 'line_matcher_name', 'start'
+ )
+ )
+ progress_calculator.update_clusterhost_progress(
+ clusterhost_mapping)
+ for (
+ clusterhost_id,
+ (clusterhost, clusterhost_state, clusterhost_log_history_mapping)
+ ) in (
+ clusterhost_mapping.items()
+ ):
+ cluster_api.update_clusterhost_state(
+ clusterhost_id, user=user,
+ percentage=clusterhost_state.get('percentage', 0),
+ message=clusterhost_state.get('message', ''),
+ severity=clusterhost_state.get('severity', 'INFO')
+ )
+ for filename, clusterhost_log_history in (
+ clusterhost_log_history_mapping.items()
+ ):
+ cluster_api.add_clusterhost_log_history(
+ clusterhost_id, user=user, filename=filename,
+ position=clusterhost_log_history.get('position', 0),
+ percentage=clusterhost_log_history.get('percentage', 0),
+ partial_line=clusterhost_log_history.get(
+ 'partial_line', ''),
+ message=clusterhost_log_history.get('message', ''),
+ severity=clusterhost_log_history.get('severity', 'INFO'),
+ line_matcher_name=(
+ clusterhost_log_history.get(
+ 'line_matcher_name', 'start'
+ )
+ )
+ )
+ progress_calculator.update_cluster_progress(
+ cluster_mapping)
+ for cluster_id, (cluster, cluster_state) in cluster_mapping.items():
+ cluster_api.update_cluster_state(
+ cluster_id, user=user
+ )
diff --git a/compass-deck/actions/util.py b/compass-deck/actions/util.py
new file mode 100644
index 0000000..4d9f855
--- /dev/null
+++ b/compass-deck/actions/util.py
@@ -0,0 +1,342 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provide util for actions
+
+ .. moduleauthor:: Xiaodong Wang ,xiaodongwang@huawei.com>
+"""
+import logging
+import redis
+
+from contextlib import contextmanager
+
+from compass.db.api import adapter_holder as adapter_db
+from compass.db.api import cluster as cluster_db
+from compass.db.api import host as host_db
+from compass.db.api import machine as machine_db
+from compass.deployment.utils import constants as const
+
+
+@contextmanager
+def lock(lock_name, blocking=True, timeout=10):
+ """acquire a lock to do some actions.
+
+ The lock is acquired by lock_name among the whole distributed
+ systems.
+ """
+ # TODO(xicheng): in future we should explicitly told which redis
+ # server we want to talk to make the lock works on distributed
+ # systems.
+ redis_instance = redis.Redis()
+ instance_lock = redis_instance.lock(lock_name, timeout=timeout)
+ owned = False
+ try:
+ locked = instance_lock.acquire(blocking=blocking)
+ if locked:
+ owned = True
+ logging.debug('acquired lock %s', lock_name)
+ yield instance_lock
+ else:
+ logging.info('lock %s is already hold', lock_name)
+ yield None
+
+ except Exception as error:
+ logging.info(
+ 'redis fails to acquire the lock %s', lock_name)
+ logging.exception(error)
+ yield None
+
+ finally:
+ if owned:
+ instance_lock.acquired_until = 0
+ instance_lock.release()
+ logging.debug('released lock %s', lock_name)
+ else:
+ logging.debug('nothing to release %s', lock_name)
+
+
+class ActionHelper(object):
+
+ @staticmethod
+ def get_adapter_info(adapter_id, cluster_id, user):
+ """Get adapter information. Return a dictionary as below,
+
+ {
+ "id": 1,
+ "name": "xxx",
+ "flavors": [
+ {
+ "flavor_name": "xxx",
+ "roles": ['xxx', 'yyy', ...],
+ "template": "xxx.tmpl"
+ },
+ ...
+ ],
+ "metadata": {
+ "os_config": {
+ ...
+ },
+ "package_config": {
+ ...
+ }
+ },
+ "os_installer": {
+ "name": "cobbler",
+ "settings": {....}
+ },
+ "pk_installer": {
+ "name": "chef",
+ "settings": {....}
+ },
+ ...
+ }
+ To view a complete output, please refer to backend doc.
+ """
+
+ adapter_info = adapter_db.get_adapter(adapter_id, user=user)
+ metadata = cluster_db.get_cluster_metadata(cluster_id, user=user)
+ adapter_info.update({const.METADATA: metadata})
+
+ for flavor_info in adapter_info[const.FLAVORS]:
+ roles = flavor_info[const.ROLES]
+ flavor_info[const.ROLES] = ActionHelper._get_role_names(roles)
+
+ return adapter_info
+
+ @staticmethod
+ def _get_role_names(roles):
+ return [role[const.NAME] for role in roles]
+
+ @staticmethod
+ def get_cluster_info(cluster_id, user):
+ """Get cluster information.Return a dictionary as below,
+
+ {
+ "id": 1,
+ "adapter_id": 1,
+ "os_version": "CentOS-6.5-x86_64",
+ "name": "cluster_01",
+ "flavor": {
+ "flavor_name": "zzz",
+ "template": "xx.tmpl",
+ "roles": [...]
+ }
+ "os_config": {..},
+ "package_config": {...},
+ "deployed_os_config": {},
+ "deployed_package_config": {},
+ "owner": "xxx"
+ }
+ """
+
+ cluster_info = cluster_db.get_cluster(cluster_id, user=user)
+
+ # convert roles retrieved from db into a list of role names
+ roles_info = cluster_info.setdefault(
+ const.FLAVOR, {}).setdefault(const.ROLES, [])
+ cluster_info[const.FLAVOR][const.ROLES] = \
+ ActionHelper._get_role_names(roles_info)
+
+ # get cluster config info
+ cluster_config = cluster_db.get_cluster_config(cluster_id, user=user)
+ cluster_info.update(cluster_config)
+
+ deploy_config = cluster_db.get_cluster_deployed_config(cluster_id,
+ user=user)
+ cluster_info.update(deploy_config)
+
+ return cluster_info
+
+ @staticmethod
+ def get_hosts_info(cluster_id, hosts_id_list, user):
+ """Get hosts information. Return a dictionary as below,
+
+ {
+ "hosts": {
+ 1($host_id): {
+ "reinstall_os": True,
+ "mac": "xxx",
+ "name": "xxx",
+ "roles": [xxx, yyy]
+ },
+ "networks": {
+ "eth0": {
+ "ip": "192.168.1.1",
+ "netmask": "255.255.255.0",
+ "is_mgmt": True,
+ "is_promiscuous": False,
+ "subnet": "192.168.1.0/24"
+ },
+ "eth1": {...}
+ },
+ "os_config": {},
+ "package_config": {},
+ "deployed_os_config": {},
+ "deployed_package_config": {}
+ },
+ 2: {...},
+ ....
+ }
+ }
+ """
+
+ hosts_info = {}
+ for host_id in hosts_id_list:
+ info = cluster_db.get_cluster_host(cluster_id, host_id, user=user)
+ logging.debug("checking on info %r %r" % (host_id, info))
+
+ info[const.ROLES] = ActionHelper._get_role_names(info[const.ROLES])
+
+ # TODO(grace): Is following line necessary??
+ info.setdefault(const.ROLES, [])
+
+ config = cluster_db.get_cluster_host_config(cluster_id,
+ host_id,
+ user=user)
+ info.update(config)
+
+ networks = info[const.NETWORKS]
+ networks_dict = {}
+ # Convert networks from list to dictionary format
+ for entry in networks:
+ nic_info = {}
+ nic_info = {
+ entry[const.NIC]: {
+ const.IP_ADDR: entry[const.IP_ADDR],
+ const.NETMASK: entry[const.NETMASK],
+ const.MGMT_NIC_FLAG: entry[const.MGMT_NIC_FLAG],
+ const.PROMISCUOUS_FLAG: entry[const.PROMISCUOUS_FLAG],
+ const.SUBNET: entry[const.SUBNET]
+ }
+ }
+ networks_dict.update(nic_info)
+
+ info[const.NETWORKS] = networks_dict
+
+ hosts_info[host_id] = info
+
+ return hosts_info
+
+ @staticmethod
+ def save_deployed_config(deployed_config, user):
+ """Save deployed config."""
+ cluster_config = deployed_config[const.CLUSTER]
+ cluster_id = cluster_config[const.ID]
+ del cluster_config[const.ID]
+
+ cluster_db.update_cluster_deployed_config(cluster_id, user=user,
+ **cluster_config)
+
+ hosts_id_list = deployed_config[const.HOSTS].keys()
+ for host_id in hosts_id_list:
+ config = deployed_config[const.HOSTS][host_id]
+ cluster_db.update_cluster_host_deployed_config(cluster_id,
+ host_id,
+ user=user,
+ **config)
+
+ @staticmethod
+ def update_state(
+ cluster_id, host_id_list, user, **kwargs
+ ):
+ # update all clusterhosts state
+ for host_id in host_id_list:
+ cluster_db.update_cluster_host_state(
+ cluster_id,
+ host_id,
+ user=user,
+ **kwargs
+ )
+
+ # update cluster state
+ cluster_db.update_cluster_state(
+ cluster_id,
+ user=user,
+ **kwargs
+ )
+
+ @staticmethod
+ def delete_cluster(
+ cluster_id, host_id_list, user, delete_underlying_host=False
+ ):
+ """Delete cluster.
+
+ If delete_underlying_host is set, underlying hosts will also
+ be deleted.
+ """
+ if delete_underlying_host:
+ for host_id in host_id_list:
+ host_db.del_host(
+ host_id, True, True, user=user
+ )
+ cluster_db.del_cluster(
+ cluster_id, True, True, user=user
+ )
+
+ @staticmethod
+ def delete_cluster_host(
+ cluster_id, host_id, user, delete_underlying_host=False
+ ):
+ """Delete clusterhost.
+
+ If delete_underlying_host set, also delete underlying host.
+ """
+ if delete_underlying_host:
+ host_db.del_host(
+ host_id, True, True, user=user
+ )
+ cluster_db.del_cluster_host(
+ cluster_id, host_id, True, True, user=user
+ )
+
+ @staticmethod
+ def delete_host(host_id, user):
+ host_db.del_host(
+ host_id, True, True, user=user
+ )
+
+ @staticmethod
+ def host_ready(host_id, from_database_only, user):
+ """Trigger host ready."""
+ host_db.update_host_state_internal(
+ host_id, from_database_only=from_database_only,
+ user=user, ready=True
+ )
+
+ @staticmethod
+ def cluster_host_ready(
+ cluster_id, host_id, from_database_only, user
+ ):
+ """Trigger clusterhost ready."""
+ cluster_db.update_cluster_host_state_internal(
+ cluster_id, host_id, from_database_only=from_database_only,
+ user=user, ready=True
+ )
+
+ @staticmethod
+ def is_cluster_os_ready(cluster_id, user=None):
+ return cluster_db.is_cluster_os_ready(cluster_id, user=user)
+
+ @staticmethod
+ def cluster_ready(cluster_id, from_database_only, user):
+ """Trigger cluster ready."""
+ cluster_db.update_cluster_state_internal(
+ cluster_id, from_database_only=from_database_only,
+ user=user, ready=True
+ )
+
+ @staticmethod
+ def get_machine_IPMI(machine_id, user):
+ machine_info = machine_db.get_machine(machine_id, user=user)
+ return machine_info[const.IPMI_CREDS]
diff --git a/compass-deck/api/__init__.py b/compass-deck/api/__init__.py
new file mode 100644
index 0000000..784fe23
--- /dev/null
+++ b/compass-deck/api/__init__.py
@@ -0,0 +1,42 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+from flask import Blueprint
+from flask.ext.login import LoginManager
+from flask import Flask
+
+# from compass.api.v1.api import v1_app
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+app = Flask(__name__)
+app.debug = True
+# blueprint = Blueprint('v2_app', __name__)
+# app.register_blueprint(v1_app, url_prefix='/v1.0')
+# app.register_blueprint(blueprint, url_prefix='/api')
+
+
+app.config['SECRET_KEY'] = 'abcd'
+app.config['AUTH_HEADER_NAME'] = setting.USER_AUTH_HEADER_NAME
+app.config['REMEMBER_COOKIE_DURATION'] = (
+ datetime.timedelta(
+ seconds=util.parse_time_interval(setting.USER_TOKEN_DURATION)
+ )
+)
+
+login_manager = LoginManager()
+login_manager.login_view = 'login'
+login_manager.init_app(app)
diff --git a/compass-deck/api/api. b/compass-deck/api/api.
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/api/api.
diff --git a/compass-deck/api/api.py b/compass-deck/api/api.py
new file mode 100644
index 0000000..e1cdd39
--- /dev/null
+++ b/compass-deck/api/api.py
@@ -0,0 +1,3391 @@
+#!/usr/bin/python
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define all the RestfulAPI entry points."""
+
+import datetime
+import functools
+import logging
+import netaddr
+import requests
+import simplejson as json
+
+from flask.ext.login import current_user
+from flask.ext.login import login_required
+from flask.ext.login import login_user
+from flask.ext.login import logout_user
+from flask import request
+
+from compass.api import app
+from compass.api import auth_handler
+from compass.api import exception_handler
+from compass.api import utils
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import cluster as cluster_api
+from compass.db.api import database
+from compass.db.api import health_check_report as health_report_api
+from compass.db.api import host as host_api
+from compass.db.api import machine as machine_api
+from compass.db.api import metadata_holder as metadata_api
+from compass.db.api import network as network_api
+from compass.db.api import permission as permission_api
+from compass.db.api import switch as switch_api
+from compass.db.api import user as user_api
+from compass.db.api import user_log as user_log_api
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+def log_user_action(func):
+ """decorator used to log api request url."""
+ @functools.wraps(func)
+ def decorated_api(*args, **kwargs):
+ # TODO(xicheng): save request args for GET
+ # and request data for POST/PUT.
+ user_log_api.log_user_action(current_user.id, request.path)
+ return func(*args, **kwargs)
+ return decorated_api
+
+
+def update_user_token(func):
+ """decorator used to update user token expire time after api request."""
+ @functools.wraps(func)
+ def decorated_api(*args, **kwargs):
+ response = func(*args, **kwargs)
+ expire_timestamp = (
+ datetime.datetime.now() + app.config['REMEMBER_COOKIE_DURATION']
+ )
+ user_api.record_user_token(
+ current_user.token, expire_timestamp, user=current_user
+ )
+ return response
+ return decorated_api
+
+
+def _clean_data(data, keys):
+ """remove keys from dict."""
+ for key in keys:
+ if key in data:
+ del data[key]
+
+
+def _replace_data(data, key_mapping):
+ """replace key names in dict."""
+ for key, replaced_key in key_mapping.items():
+ if key in data:
+ data[replaced_key] = data[key]
+ del data[key]
+
+
+def _get_data(data, key):
+ """get key's value from request arg dict.
+
+ When the value is list, return the element in the list
+ if the list size is one. If the list size is greater than one,
+ raise exception_handler.BadRequest.
+
+ Example: data = {'a': ['b'], 'b': 5, 'c': ['d', 'e'], 'd': []}
+ _get_data(data, 'a') == 'b'
+ _get_data(data, 'b') == 5
+ _get_data(data, 'c') raises exception_handler.BadRequest
+ _get_data(data, 'd') == None
+ _get_data(data, 'e') == None
+
+ Usage: Used to parse the key-value pair in request.args to expected types.
+ Depends on the different flask plugins and what kind of parameters
+ passed in, the request.args format may be as below:
+ {'a': 'b'} or {'a': ['b']}. _get_data forces translate the
+ request.args to the format {'a': 'b'}. It raises exception when some
+ parameter declares multiple times.
+ """
+ if key in data:
+ if isinstance(data[key], list):
+ if data[key]:
+ if len(data[key]) == 1:
+ return data[key][0]
+ else:
+ raise exception_handler.BadRequest(
+ '%s declared multi times %s in request' % (
+ key, data[key]
+ )
+ )
+ else:
+ return None
+ else:
+ return data[key]
+ else:
+ return None
+
+
+def _get_data_list(data, key):
+ """get key's value as list from request arg dict.
+
+ If the value type is list, return it, otherwise return the list
+ whos only element is the value got from the dict.
+
+ Example: data = {'a': ['b'], 'b': 5, 'c': ['d', 'e'], 'd': []}
+ _get_data_list(data, 'a') == ['b']
+ _get_data_list(data, 'b') == [5]
+ _get_data_list(data, 'd') == []
+ _get_data_list(data, 'e') == []
+
+ Usage: Used to parse the key-value pair in request.args to expected types.
+ Depends on the different flask plugins and what kind of parameters
+ passed in, the request.args format may be as below:
+ {'a': 'b'} or {'a': ['b']}. _get_data_list forces translate the
+ request.args to the format {'a': ['b']}. It accepts the case that
+ some parameter declares multiple times.
+ """
+ if key in data:
+ if isinstance(data[key], list):
+ return data[key]
+ else:
+ return [data[key]]
+ else:
+ return []
+
+
+def _get_request_data():
+ """Convert reqeust data from string to python dict.
+
+ If the request data is not json formatted, raises
+ exception_handler.BadRequest.
+ If the request data is not json formatted dict, raises
+ exception_handler.BadRequest
+ If the request data is empty, return default as empty dict.
+
+ Usage: It is used to add or update a single resource.
+ """
+ if request.data:
+ try:
+ data = json.loads(request.data)
+ except Exception:
+ raise exception_handler.BadRequest(
+ 'request data is not json formatted: %s' % request.data
+ )
+ if not isinstance(data, dict):
+ raise exception_handler.BadRequest(
+ 'request data is not json formatted dict: %s' % request.data
+ )
+ return data
+ else:
+ return {}
+
+
+def _get_request_data_as_list():
+ """Convert reqeust data from string to python list.
+
+ If the request data is not json formatted, raises
+ exception_handler.BadRequest.
+ If the request data is not json formatted list, raises
+ exception_handler.BadRequest.
+ If the request data is empty, return default as empty list.
+
+ Usage: It is used to batch add or update a list of resources.
+ """
+ if request.data:
+ try:
+ data = json.loads(request.data)
+ except Exception:
+ raise exception_handler.BadRequest(
+ 'request data is not json formatted: %s' % request.data
+ )
+ if not isinstance(data, list):
+ raise exception_handler.BadRequest(
+ 'request data is not json formatted list: %s' % request.data
+ )
+ return data
+ else:
+ return []
+
+
+def _bool_converter(value):
+ """Convert string value to bool.
+
+ This function is used to convert value in requeset args to expected type.
+ If the key exists in request args but the value is not set, it means the
+ value should be true.
+
+ Examples:
+ /<request_path>?is_admin parsed to {'is_admin', None} and it should
+ be converted to {'is_admin': True}.
+ /<request_path>?is_admin=0 parsed and converted to {'is_admin': False}.
+ /<request_path>?is_admin=1 parsed and converted to {'is_admin': True}.
+ """
+ if not value:
+ return True
+ if value in ['False', 'false', '0']:
+ return False
+ if value in ['True', 'true', '1']:
+ return True
+ raise exception_handler.BadRequest(
+ '%r type is not bool' % value
+ )
+
+
+def _int_converter(value):
+ """Convert string value to int.
+
+ We do not use the int converter default exception since we want to make
+ sure the exact http response code.
+
+ Raises: exception_handler.BadRequest if value can not be parsed to int.
+
+ Examples:
+ /<request_path>?count=10 parsed to {'count': '10'} and it should be
+ converted to {'count': 10}.
+ """
+ try:
+ return int(value)
+ except Exception:
+ raise exception_handler.BadRequest(
+ '%r type is not int' % value
+ )
+
+
+def _get_request_args(**kwargs):
+ """Get request args as dict.
+
+ The value in the dict is converted to expected type.
+
+ Args:
+ kwargs: for each key, the value is the type converter.
+ """
+ args = dict(request.args)
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'origin request args: %s', args
+ )
+ for key, value in args.items():
+ if key in kwargs:
+ converter = kwargs[key]
+ if isinstance(value, list):
+ args[key] = [converter(item) for item in value]
+ else:
+ args[key] = converter(value)
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'request args: %s', args
+ )
+ return args
+
+
+def _group_data_action(data, **data_callbacks):
+ """Group api actions and pass data to grouped action callback.
+
+ Example:
+ data = {
+ 'add_hosts': [{'name': 'a'}, {'name': 'b'}],
+ 'update_hosts': {'c': {'mac': '123'}},
+ 'remove_hosts': ['d', 'e']
+ }
+ data_callbacks = {
+ 'add_hosts': update_cluster_action,
+ 'update_hosts': update_cluster_action,
+ 'remove_hosts': update_cluster_action
+ }
+ it converts to update_cluster_action(
+ add_hosts=[{'name': 'a'}, {'name': 'b'}],
+ update_hosts={'c': {'mac': '123'}},
+ remove_hosts=['d', 'e']
+ )
+
+ Raises:
+ exception_handler.BadRequest if data is empty.
+ exception_handler.BadMethod if there are some keys in data but
+ not in data_callbacks.
+ exception_handler.BadRequest if it groups to multiple
+ callbacks.
+ """
+ if not data:
+ raise exception_handler.BadRequest(
+ 'no action to take'
+ )
+ unsupported_keys = list(set(data) - set(data_callbacks))
+ if unsupported_keys:
+ raise exception_handler.BadMethod(
+ 'unsupported actions: %s' % unsupported_keys
+ )
+ callback_datas = {}
+ for data_key, data_value in data.items():
+ callback = data_callbacks[data_key]
+ callback_datas.setdefault(id(callback), {})[data_key] = data_value
+ if len(callback_datas) > 1:
+ raise exception_handler.BadRequest(
+ 'multi actions are not supported'
+ )
+ callback_ids = {}
+ for data_key, data_callback in data_callbacks.items():
+ callback_ids[id(data_callback)] = data_callback
+ for callback_id, callback_data in callback_datas.items():
+ return callback_ids[callback_id](**callback_data)
+
+
+def _wrap_response(func, response_code):
+ """wrap function response to json formatted http response."""
+ def wrapped_func(*args, **kwargs):
+ return utils.make_json_response(
+ response_code,
+ func(*args, **kwargs)
+ )
+ return wrapped_func
+
+
+def _reformat_host_networks(networks):
+ """Reformat networks from list to dict.
+
+ The key in the dict is the value of the key 'interface'
+ in each network.
+
+ Example: networks = [{'interface': 'eth0', 'ip': '10.1.1.1'}]
+ is reformatted to {
+ 'eth0': {'interface': 'eth0', 'ip': '10.1.1.1'}
+ }
+
+ Usage: The networks got from db api is a list of network,
+ For better parsing in json frontend, we converted the
+ format into dict to easy reference.
+ """
+ network_mapping = {}
+ for network in networks:
+ if 'interface' in network:
+ network_mapping[network['interface']] = network
+ return network_mapping
+
+
+def _reformat_host(host):
+ """Reformat host's networks."""
+ if isinstance(host, list):
+ return [_reformat_host(item) for item in host]
+ if 'networks' in host:
+ host['networks'] = _reformat_host_networks(host['networks'])
+ return host
+
+
+def _login(use_cookie):
+ """User login helper function.
+
+ The request data should contain at least 'email' and 'password'.
+ The cookie expiration duration is defined in flask app config.
+ If user is not authenticated, it raises Unauthorized exception.
+ """
+ data = _get_request_data()
+ if 'email' not in data or 'password' not in data:
+ raise exception_handler.BadRequest(
+ 'missing email or password in data'
+ )
+ expire_timestamp = (
+ datetime.datetime.now() + app.config['REMEMBER_COOKIE_DURATION']
+ )
+ data['expire_timestamp'] = expire_timestamp
+ user = auth_handler.authenticate_user(**data)
+ if not user.active:
+ raise exception_handler.UserDisabled(
+ '%s is not activated' % user.email
+ )
+ if not login_user(user, remember=data.get('remember', False)):
+ raise exception_handler.UserDisabled('failed to login: %s' % user)
+
+ user_log_api.log_user_action(user.id, request.path)
+ response_data = user_api.record_user_token(
+ user.token, user.expire_timestamp, user=user
+ )
+ return utils.make_json_response(200, response_data)
+
+
+@app.route('/users/token', methods=['POST'])
+def get_token():
+ """user login and return token."""
+ return _login(False)
+
+
+@app.route("/users/login", methods=['POST'])
+def login():
+ """User login."""
+ return _login(True)
+
+
+@app.route("/users/register", methods=['POST'])
+def register():
+ """register new user."""
+ data = _get_request_data()
+ data['is_admin'] = False
+ data['active'] = False
+ return utils.make_json_response(
+ 200, user_api.add_user(**data)
+ )
+
+
+@app.route('/users/logout', methods=['POST'])
+@login_required
+def logout():
+ """User logout."""
+ user_log_api.log_user_action(current_user.id, request.path)
+ response_data = user_api.clean_user_token(
+ current_user.token, user=current_user
+ )
+ logout_user()
+ return utils.make_json_response(200, response_data)
+
+
+@app.route("/users", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_users():
+ """list users.
+
+ Supported paramters: ['email', 'is_admin', 'active']
+ """
+ data = _get_request_args(
+ is_admin=_bool_converter,
+ active=_bool_converter
+ )
+ return utils.make_json_response(
+ 200, user_api.list_users(user=current_user, **data)
+ )
+
+
+@app.route("/users", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_user():
+ """add user.
+
+ Must parameters: ['email', 'password'],
+ Optional paramters: ['is_admin', 'active']
+ """
+ data = _get_request_data()
+ user_dict = user_api.add_user(user=current_user, **data)
+ return utils.make_json_response(
+ 200, user_dict
+ )
+
+
+@app.route("/users/<int:user_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_user(user_id):
+ """Get user by id."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200, user_api.get_user(user_id, user=current_user, **data)
+ )
+
+
+@app.route("/current-user", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_current_user():
+ """Get current user."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200, user_api.get_current_user(user=current_user, **data)
+ )
+
+
+@app.route("/users/<int:user_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_user(user_id):
+ """Update user.
+
+ Supported parameters by self: [
+ 'email', 'firstname', 'lastname', 'password'
+ ]
+ Supported parameters by admin ['is_admin', 'active']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_api.update_user(
+ user_id,
+ user=current_user,
+ **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_user(user_id):
+ """Delete user.
+
+ Delete is only permitted by admin user.
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_api.del_user(
+ user_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>/permissions", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_user_permissions(user_id):
+ """Get user permissions."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200, user_api.get_permissions(user_id, user=current_user, **data)
+ )
+
+
+@app.route("/users/<int:user_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_user_action(user_id):
+ """Take user action.
+
+ Support actions: [
+ 'add_permissions', 'remove_permissions',
+ 'set_permissions', 'enable_user',
+ 'disable_user'
+ ]
+ """
+ data = _get_request_data()
+ update_permissions_func = _wrap_response(
+ functools.partial(
+ user_api.update_permissions, user_id, user=current_user,
+ ),
+ 200
+ )
+
+ def disable_user(disable_user=None):
+ return user_api.update_user(
+ user_id, user=current_user, active=False
+ )
+
+ disable_user_func = _wrap_response(
+ disable_user,
+ 200
+ )
+
+ def enable_user(enable_user=None):
+ return user_api.update_user(
+ user_id, user=current_user, active=True
+ )
+
+ enable_user_func = _wrap_response(
+ enable_user,
+ 200
+ )
+ return _group_data_action(
+ data,
+ add_permissions=update_permissions_func,
+ remove_permissions=update_permissions_func,
+ set_permissions=update_permissions_func,
+ enable_user=enable_user_func,
+ disable_user=disable_user_func
+ )
+
+
+@app.route(
+ '/users/<int:user_id>/permissions/<int:permission_id>',
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_user_permission(user_id, permission_id):
+ """Get a specific user permission."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ user_api.get_permission(
+ user_id, permission_id, user=current_user,
+ **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>/permissions", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_user_permission(user_id):
+ """Add permission to a specific user.
+
+ add_user_permission is only permitted by admin user.
+ Must parameters: ['permission_id']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_api.add_permission(
+ user_id, user=current_user,
+ **data
+ )
+ )
+
+
+@app.route(
+ '/users/<int:user_id>/permissions/<permission_id>',
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_user_permission(user_id, permission_id):
+ """Delete a specific user permission."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_api.del_permission(
+ user_id, permission_id, user=current_user,
+ **data
+ )
+ )
+
+
+@app.route("/permissions", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_permissions():
+ """List permissions.
+
+ Supported filters: ['id', 'name', 'alias', 'description']
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ permission_api.list_permissions(user=current_user, **data)
+ )
+
+
+@app.route("/permissions/<int:permission_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_permission(permission_id):
+ """Get permission."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ permission_api.get_permission(permission_id, user=current_user, **data)
+ )
+
+
+def _filter_timestamp(data):
+ """parse timestamp related params to db api understandable params.
+
+ Example:
+ {'timestamp_start': '2005-12-23 12:00:00'} to
+ {'timestamp': {'ge': timestamp('2005-12-23 12:00:00')}},
+ {'timestamp_end': '2005-12-23 12:00:00'} to
+ {'timestamp': {'le': timestamp('2005-12-23 12:00:00')}},
+ {'timestamp_range': '2005-12-23 12:00:00,2005-12-24 12:00:00'} to
+ {'timestamp': {'between': [
+ timestamp('2005-12-23 12:00:00'),
+ timestamp('2005-12-24 12:00:00')
+ ]
+ }}
+
+ The timestamp related params can be declared multi times.
+ """
+ timestamp_filter = {}
+ start = _get_data(data, 'timestamp_start')
+ if start is not None:
+ timestamp_filter['ge'] = util.parse_datetime(
+ start, exception_handler.BadRequest
+ )
+ end = _get_data(data, 'timestamp_end')
+ if end is not None:
+ timestamp_filter['le'] = util.parse_datetime(
+ end, exception_handler.BadRequest)
+ range = _get_data_list(data, 'timestamp_range')
+ if range:
+ timestamp_filter['between'] = []
+ for value in range:
+ timestamp_filter['between'].append(
+ util.parse_datetime_range(
+ value, exception_handler.BadRequest
+ )
+ )
+ data['timestamp'] = timestamp_filter
+ _clean_data(
+ data,
+ [
+ 'timestamp_start', 'timestamp_end',
+ 'timestamp_range'
+ ]
+ )
+
+
+@app.route("/users/logs", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_all_user_actions():
+ """List all users actions.
+
+ Supported filters: [
+ 'timestamp_start', 'timestamp_end', 'timestamp_range',
+ 'user_email'
+ ]
+ """
+ data = _get_request_args()
+ _filter_timestamp(data)
+ return utils.make_json_response(
+ 200,
+ user_log_api.list_actions(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>/logs", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_user_actions(user_id):
+ """List user actions for specific user.
+
+ Supported filters: [
+ 'timestamp_start', 'timestamp_end', 'timestamp_range',
+ ]
+ """
+ data = _get_request_args()
+ _filter_timestamp(data)
+ return utils.make_json_response(
+ 200,
+ user_log_api.list_user_actions(
+ user_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/users/logs", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_all_user_actions():
+ """Delete all user actions."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_log_api.del_actions(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>/logs", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_user_actions(user_id):
+ """Delete user actions for specific user."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_log_api.del_user_actions(
+ user_id, user=current_user, **data
+ )
+ )
+
+
+def _filter_switch_ip(data):
+ """filter switch ip related params to db/api understandable format.
+
+ Examples:
+ {'switchIp': '10.0.0.1'} to {'ip_int': {'eq': int of '10.0.0.1'}}
+ {'switchIpStart': '10.0.0.1'} to
+ {'ip_int': {'ge': int of '10.0.0.1'}}
+ {'switchIpEnd': '10.0.0.1'} to
+ {'ip_int': {'le': int of '10.0.0.1'}}
+ {'switchIpRange': '10.0.0.1,10.0.0.254'} to
+ {'ip_int': {'between': [int of '10.0.0.1', int of '10.0.0.254']}}
+
+ the switch ip related params can be declared multi times.
+ """
+ ip_filter = {}
+ switch_ips = _get_data_list(data, 'switchIp')
+ if switch_ips:
+ ip_filter['eq'] = []
+ for switch_ip in switch_ips:
+ ip_filter['eq'].append(long(netaddr.IPAddress(switch_ip)))
+ switch_start = _get_data(data, 'switchIpStart')
+ if switch_start is not None:
+ ip_filter['ge'] = long(netaddr.IPAddress(switch_start))
+ switch_end = _get_data(data, 'switchIpEnd')
+ if switch_end is not None:
+ ip_filter['lt'] = long(netaddr.IPAddress(switch_end))
+ switch_nets = _get_data_list(data, 'switchIpNetwork')
+ if switch_nets:
+ ip_filter['between'] = []
+ for switch_net in switch_nets:
+ network = netaddr.IPNetwork(switch_net)
+ ip_filter['between'].append((network.first, network.last))
+ switch_ranges = _get_data_list(data, 'switchIpRange')
+ if switch_ranges:
+ ip_filter.setdefault('between', [])
+ for switch_range in switch_ranges:
+ ip_start, ip_end = switch_range.split(',')
+ ip_filter['between'].append(
+ long(netaddr.IPAddress(ip_start)),
+ long(netaddr.IPAddress(ip_end))
+ )
+ if ip_filter:
+ data['ip_int'] = ip_filter
+ _clean_data(
+ data,
+ [
+ 'switchIp', 'switchIpStart', 'switchIpEnd',
+ 'switchIpNetwork', 'switchIpRange'
+ ]
+ )
+
+
+@app.route("/switches", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switches():
+ """List switches.
+
+ Supported filters: [
+ 'switchIp', 'switchIpStart', 'switchIpEnd',
+ 'switchIpEnd', 'vendor', 'state'
+ ]
+ """
+ data = _get_request_args()
+ _filter_switch_ip(data)
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switches(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_switch(switch_id):
+ """Get switch."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200, switch_api.get_switch(switch_id, user=current_user, **data)
+ )
+
+
+@app.route("/switches", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_switch():
+ """add switch.
+
+ Must fields: ['ip']
+ Optional fields: [
+ 'credentials', 'vendor', 'state',
+ 'err_msg', 'filters'
+ ]
+ """
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.add_switch(user=current_user, **data)
+ )
+
+
+@app.route("/switchesbatch", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_switches():
+ """batch add switches.
+
+ request data is a list of dict. Each dict must contain ['ip'],
+ may contain [
+ 'credentials', 'vendor', 'state', 'err_msg', 'filters'
+ ]
+ """
+ data = _get_request_data_as_list()
+ for item_data in data:
+ _replace_data(item_data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.add_switches(
+ data=data, user=current_user
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_switch(switch_id):
+ """update switch.
+
+ Supported fields: [
+ 'ip', 'credentials', 'vendor', 'state',
+ 'err_msg', 'filters'
+ ]
+ """
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.update_switch(switch_id, user=current_user, **data)
+ )
+
+
+@app.route("/switches/<int:switch_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_switch(switch_id):
+ """patch switch.
+
+ Supported fields: [
+ 'credentials', 'filters'
+ ]
+ """
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.patch_switch(switch_id, user=current_user, **data)
+ )
+
+
+@app.route("/switches/<int:switch_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_switch(switch_id):
+ """delete switch."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.del_switch(switch_id, user=current_user, **data)
+ )
+
+
+@util.deprecated
+@app.route("/switch-filters", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switch_filters():
+ """List switch filters."""
+ data = _get_request_args()
+ _filter_switch_ip(data)
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switch_filters(
+ user=current_user, **data
+ )
+ )
+
+
+@util.deprecated
+@app.route("/switch-filters/<int:switch_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_switch_filters(switch_id):
+ """Get switch filters."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ switch_api.get_switch_filters(switch_id, user=current_user, **data)
+ )
+
+
+@util.deprecated
+@app.route("/switch-filters/<int:switch_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_switch_filters(switch_id):
+ """update switch filters."""
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.update_switch_filters(switch_id, user=current_user, **data)
+ )
+
+
+@util.deprecated
+@app.route("/switch-filters/<int:switch_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_switch_filters(switch_id):
+ """patch switch filters."""
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.patch_switch_filter(switch_id, user=current_user, **data)
+ )
+
+
+def _filter_switch_port(data):
+ """Generate switch machine filters by switch port related fields.
+
+ Examples:
+ {'port': 'ae20'} to {'port': {'eq': 'ae20'}}
+ {'portStart': 20, 'portPrefix': 'ae', 'portSuffix': ''} to
+ {'port': {'startswith': 'ae', 'endswith': '', 'resp_ge': 20}}
+ {'portEnd': 20, 'portPrefix': 'ae', 'portSuffix': ''} to
+ {'port': {'startswith': 'ae', 'endswith': '', 'resp_le': 20}}
+ {'portRange': '20,40', 'portPrefix': 'ae', 'portSuffix': ''} to
+ {'port': {
+ 'startswith': 'ae', 'endswith': '', 'resp_range': [(20. 40)]
+ }}
+
+ For each switch machines port, it extracts portNumber from
+ '<portPrefix><portNumber><portSuffix>' and filter the returned switch
+ machines by the filters.
+ """
+ port_filter = {}
+ ports = _get_data_list(data, 'port')
+ if ports:
+ port_filter['eq'] = ports
+ port_start = _get_data(data, 'portStart')
+ if port_start is not None:
+ port_filter['resp_ge'] = int(port_start)
+ port_end = _get_data(data, 'portEnd')
+ if port_end is not None:
+ port_filter['resp_lt'] = int(port_end)
+ port_ranges = _get_data_list(data, 'portRange')
+ if port_ranges:
+ port_filter['resp_range'] = []
+ for port_range in port_ranges:
+ port_start, port_end = port_range.split(',')
+ port_filter['resp_range'].append(
+ (int(port_start), int(port_end))
+ )
+ port_prefix = _get_data(data, 'portPrefix')
+ if port_prefix:
+ port_filter['startswith'] = port_prefix
+ port_suffix = _get_data(data, 'portSuffix')
+ if port_suffix:
+ port_filter['endswith'] = port_suffix
+ if port_filter:
+ data['port'] = port_filter
+ _clean_data(
+ data,
+ [
+ 'portStart', 'portEnd', 'portRange',
+ 'portPrefix', 'portSuffix'
+ ]
+ )
+
+
+def _filter_general(data, key):
+ """Generate general filter for db/api returned list.
+
+ Supported filter type: [
+ 'resp_eq', 'resp_in', 'resp_le', 'resp_ge',
+ 'resp_gt', 'resp_lt', 'resp_match'
+ ]
+ """
+ general_filter = {}
+ general = _get_data_list(data, key)
+ if general:
+ general_filter['resp_in'] = general
+ data[key] = general_filter
+
+
+def _filter_machine_tag(data):
+ """Generate filter for machine tag.
+
+ Examples:
+ original returns:
+ [{'tag': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b3',
+ 'stack': '20'
+ }},{'location': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b2',
+ 'stack': '20'
+ }}]
+ filter: {'tag': 'room=205;rack=a2b3'}
+ filtered: [{'tag': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b3',
+ 'stack': '20'
+ }}]
+ """
+ tag_filter = {}
+ tags = _get_data_list(data, 'tag')
+ if tags:
+ tag_filter['resp_in'] = []
+ for tag in tags:
+ tag_filter['resp_in'].append(
+ util.parse_request_arg_dict(tag)
+ )
+ data['tag'] = tag_filter
+
+
+def _filter_machine_location(data):
+ """Generate filter for machine location.
+
+ Examples:
+ original returns:
+ [{'location': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b3',
+ 'stack': '20'
+ }},{'location': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b2',
+ 'stack': '20'
+ }}]
+ filter: {'location': 'room=205;rack=a2b3'}
+ filtered: [{'location': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b3',
+ 'stack': '20'
+ }}]
+ """
+ location_filter = {}
+ locations = _get_data_list(data, 'location')
+ if locations:
+ location_filter['resp_in'] = []
+ for location in locations:
+ location_filter['resp_in'].append(
+ util.parse_request_arg_dict(location)
+ )
+ data['location'] = location_filter
+
+
+@app.route("/switches/<int:switch_id>/machines", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switch_machines(switch_id):
+ """Get switch machines.
+
+ Supported filters: [
+ 'port', 'portStart', 'portEnd', 'portRange',
+ 'portPrefix', 'portSuffix', 'vlans', 'tag', 'location'
+ ]
+ """
+ data = _get_request_args(vlans=_int_converter)
+ _filter_switch_port(data)
+ _filter_general(data, 'vlans')
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switch_machines(
+ switch_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>/machines-hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switch_machines_hosts(switch_id):
+ """Get switch machines or hosts.
+
+ Supported filters: [
+ 'port', 'portStart', 'portEnd', 'portRange',
+ 'portPrefix', 'portSuffix', 'vlans', 'tag', 'location',
+ 'os_name', 'os_id'
+ ]
+
+ """
+ data = _get_request_args(vlans=_int_converter, os_id=_int_converter)
+ _filter_switch_port(data)
+ _filter_general(data, 'vlans')
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ _filter_general(data, 'os_name')
+ # TODO(xicheng): os_id filter should be removed later
+ _filter_general(data, 'os_id')
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switch_machines_hosts(
+ switch_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>/machines", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_switch_machine(switch_id):
+ """add switch machine.
+
+ Must fields: ['mac', 'port']
+ Optional fields: ['vlans', 'ipmi_credentials', 'tag', 'location']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.add_switch_machine(switch_id, user=current_user, **data)
+ )
+
+
+@app.route("/switches/machines", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_switch_machines():
+ """batch add switch machines.
+
+ request data is list of dict which contains switch machine fields.
+ Each dict must contain ['switch_ip', 'mac', 'port'],
+ may contain ['vlans', 'ipmi_credentials', 'tag', 'location'].
+ """
+ data = _get_request_data_as_list()
+ return utils.make_json_response(
+ 200, switch_api.add_switch_machines(
+ data=data, user=current_user
+ )
+ )
+
+
+@app.route(
+ '/switches/<int:switch_id>/machines/<int:machine_id>',
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_switch_machine(switch_id, machine_id):
+ """get switch machine."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ switch_api.get_switch_machine(
+ switch_id, machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/switches/<int:switch_id>/machines/<int:machine_id>',
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_switch_machine(switch_id, machine_id):
+ """update switch machine.
+
+ Supported fields: [
+ 'port', 'vlans', 'ipmi_credentials', 'tag', 'location'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.update_switch_machine(
+ switch_id, machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/switches/<int:switch_id>/machines/<int:machine_id>',
+ methods=['PATCH']
+)
+@log_user_action
+@login_required
+@update_user_token
+def patch_switch_machine(switch_id, machine_id):
+ """patch switch machine.
+
+ Supported fields: [
+ 'vlans', 'ipmi_credentials', 'tag', 'location'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.patch_switch_machine(
+ current_user, switch_id, machine_id, **data
+ )
+ )
+
+
+@app.route(
+ '/switches/<int:switch_id>/machines/<int:machine_id>',
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_switch_machine(switch_id, machine_id):
+ """Delete switch machine."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.del_switch_machine(
+ switch_id, machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_switch_action(switch_id):
+ """take switch action.
+
+ Supported actions: [
+ 'find_machines', 'add_machines', 'remove_machines',
+ 'set_machines'
+ ]
+ """
+ data = _get_request_data()
+ poll_switch_func = _wrap_response(
+ functools.partial(
+ switch_api.poll_switch, switch_id, user=current_user,
+ ),
+ 202
+ )
+ update_switch_machines_func = _wrap_response(
+ functools.partial(
+ switch_api.update_switch_machines, switch_id, user=current_user,
+ ),
+ 200
+ )
+ return _group_data_action(
+ data,
+ find_machines=poll_switch_func,
+ add_machines=update_switch_machines_func,
+ remove_machines=update_switch_machines_func,
+ set_machines=update_switch_machines_func
+ )
+
+
+@app.route("/machines/<int:machine_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_machine_action(machine_id):
+ """take machine action.
+
+ Supported actions: ['tag', 'poweron', 'poweroff', 'reset']
+ """
+ data = _get_request_data()
+ tag_func = _wrap_response(
+ functools.partial(
+ machine_api.update_machine, machine_id, user=current_user,
+ ),
+ 200
+ )
+ poweron_func = _wrap_response(
+ functools.partial(
+ machine_api.poweron_machine, machine_id, user=current_user,
+ ),
+ 202
+ )
+ poweroff_func = _wrap_response(
+ functools.partial(
+ machine_api.poweroff_machine, machine_id, user=current_user,
+ ),
+ 202
+ )
+ reset_func = _wrap_response(
+ functools.partial(
+ machine_api.reset_machine, machine_id, user=current_user,
+ ),
+ 202
+ )
+ return _group_data_action(
+ data,
+ tag=tag_func,
+ poweron=poweron_func,
+ poweroff=poweroff_func,
+ reset=reset_func
+ )
+
+
+@app.route("/switch-machines", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switchmachines():
+ """List switch machines.
+
+ Supported filters: [
+ 'vlans', 'switchIp', 'SwitchIpStart',
+ 'SwitchIpEnd', 'SwitchIpRange', 'port',
+ 'portStart', 'portEnd', 'portRange',
+ 'location', 'tag', 'mac'
+ ]
+ """
+ data = _get_request_args(vlans=_int_converter)
+ _filter_switch_ip(data)
+ _filter_switch_port(data)
+ _filter_general(data, 'vlans')
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switchmachines(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches-machines-hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switchmachines_hosts():
+ """List switch machines or hosts.
+
+ Supported filters: [
+ 'vlans', 'switchIp', 'SwitchIpStart',
+ 'SwitchIpEnd', 'SwitchIpRange', 'port',
+ 'portStart', 'portEnd', 'portRange',
+ 'location', 'tag', 'mac', 'os_name'
+ ]
+
+ """
+ data = _get_request_args(vlans=_int_converter, os_id=_int_converter)
+ _filter_switch_ip(data)
+ _filter_switch_port(data)
+ _filter_general(data, 'vlans')
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ _filter_general(data, 'os_name')
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switchmachines_hosts(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/switch-machines/<int:switch_machine_id>',
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_switchmachine(switch_machine_id):
+ """get switch machine."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ switch_api.get_switchmachine(
+ switch_machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/switch-machines/<int:switch_machine_id>',
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_switchmachine(switch_machine_id):
+ """update switch machine.
+
+ Support fields: [
+ ''port', 'vlans', 'ipmi_credentials', 'tag', 'location'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.update_switchmachine(
+ switch_machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route('/switch-machines/<int:switch_machine_id>', methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_switchmachine(switch_machine_id):
+ """patch switch machine.
+
+ Support fields: [
+ 'vlans', 'ipmi_credentials', 'tag', 'location'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.patch_switchmachine(
+ switch_machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/switch-machines/<int:switch_machine_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_switchmachine(switch_machine_id):
+ """Delete switch machine."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.del_switchmachine(
+ switch_machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/machines", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_machines():
+ """List machines.
+
+ Supported filters: [
+ 'tag', 'location', 'mac'
+ ]
+ """
+ data = _get_request_args()
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ return utils.make_json_response(
+ 200,
+ machine_api.list_machines(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/machine/discovery", methods=['POST'])
+def switch_discovery():
+ """switch on/off hardware discovery"""
+ data = _get_request_args()
+
+
+@app.route("/machines", methods=['POST'])
+def add_machine():
+ """add machine by tinycore.
+
+ supported fileds: [
+ 'tag', 'location', 'ipmi_credentials',
+ 'machine_attributes'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ machine_api.add_machine(**data)
+ )
+
+
+@app.route("/machines/<int:machine_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_machine(machine_id):
+ """Get machine."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ machine_api.get_machine(
+ machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/machines/<int:machine_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_machine(machine_id):
+ """update machine.
+
+ Supported fields: [
+ 'tag', 'location', 'ipmi_credentials',
+ 'machine_attributes'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ machine_api.update_machine(
+ machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/machines/<int:machine_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_machine(machine_id):
+ """patch machine.
+
+ Supported fields: [
+ 'tag', 'location', 'ipmi_credentials',
+ 'machine_attributes'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ machine_api.patch_machine(
+ machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/machines/<int:machine_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_machine(machine_id):
+ """Delete machine."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ machine_api.del_machine(
+ machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/subnets", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_subnets():
+ """List subnets.
+
+ Supported filters: [
+ 'subnet', 'name'
+ ]
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ network_api.list_subnets(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/subnets/<int:subnet_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_subnet(subnet_id):
+ """Get subnet."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ network_api.get_subnet(
+ subnet_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/subnets", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_subnet():
+ """add subnet.
+
+ Must fields: ['subnet']
+ Optional fields: ['name']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ network_api.add_subnet(user=current_user, **data)
+ )
+
+
+@app.route("/subnets/<int:subnet_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_subnet(subnet_id):
+ """update subnet.
+
+ Support fields: ['subnet', 'name']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ network_api.update_subnet(
+ subnet_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/subnets/<int:subnet_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_subnet(subnet_id):
+ """Delete subnet."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ network_api.del_subnet(
+ subnet_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/adapters", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_adapters():
+ """List adapters.
+
+ Supported filters: [
+ 'name'
+ ]
+ """
+ data = _get_request_args()
+ _filter_general(data, 'name')
+ return utils.make_json_response(
+ 200,
+ adapter_api.list_adapters(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/adapters/<adapter_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_adapter(adapter_id):
+ """Get adapter."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ adapter_api.get_adapter(
+ adapter_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/adapters/<adapter_id>/metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_adapter_metadata(adapter_id):
+ """Get adapter metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_package_metadata(
+ adapter_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/oses/<os_id>/metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_os_metadata(os_id):
+ """Get os metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_os_metadata(
+ os_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/oses/<os_id>/ui_metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def convert_os_metadata(os_id):
+ """Convert os metadata to ui os metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_os_ui_metadata(
+ os_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/flavors/<flavor_id>/metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_flavor_metadata(flavor_id):
+ """Get flavor metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_flavor_metadata(
+ flavor_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/flavors/<flavor_id>/ui_metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def convert_flavor_metadata(flavor_id):
+ """Convert flavor metadata to ui flavor metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_flavor_ui_metadata(
+ flavor_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/adapters/<adapter_id>/oses/<os_id>/metadata",
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_adapter_os_metadata(adapter_id, os_id):
+ """Get adapter metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_package_os_metadata(
+ adapter_id, os_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_clusters():
+ """List clusters.
+
+ Supported filters: [
+ 'name', 'os_name', 'owner', 'adapter_name', 'flavor_name'
+ ]
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.list_clusters(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster(cluster_id):
+ """Get cluster."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_cluster():
+ """add cluster.
+
+ Must fields: ['name', 'adapter_id', 'os_id']
+ Optional fields: ['flavor_id']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.add_cluster(user=current_user, **data)
+ )
+
+
+@app.route("/clusters/<int:cluster_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster(cluster_id):
+ """update cluster.
+
+ Supported fields: ['name', 'reinstall_distributed_system']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_cluster(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_cluster(cluster_id):
+ """Delete cluster."""
+ data = _get_request_data()
+ response = cluster_api.del_cluster(
+ cluster_id, user=current_user, **data
+ )
+ if 'status' in response:
+ return utils.make_json_response(
+ 202, response
+ )
+ else:
+ return utils.make_json_response(
+ 200, response
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/config", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_config(cluster_id):
+ """Get cluster config."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_config(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_metadata(cluster_id):
+ """Get cluster metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_metadata(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/config", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster_config(cluster_id):
+ """update cluster config.
+
+ Supported fields: ['os_config', 'package_config', 'config_step']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_cluster_config(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/config", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_cluster_config(cluster_id):
+ """patch cluster config.
+
+ Supported fields: ['os_config', 'package_config', 'config_step']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_cluster_config(cluster_id, user=current_user, **data)
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/config", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_cluster_config(cluster_id):
+ """Delete cluster config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.del_cluster_config(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_cluster_action(cluster_id):
+ """take cluster action.
+
+ Supported actions: [
+ 'add_hosts', 'remove_hosts', 'set_hosts',
+ 'review', 'deploy', 'check_health', 'apply_patch'
+ ]
+ """
+ data = _get_request_data()
+ url_root = request.url_root
+
+ update_cluster_hosts_func = _wrap_response(
+ functools.partial(
+ cluster_api.update_cluster_hosts, cluster_id, user=current_user,
+ ),
+ 200
+ )
+ review_cluster_func = _wrap_response(
+ functools.partial(
+ cluster_api.review_cluster, cluster_id, user=current_user,
+ ),
+ 200
+ )
+ deploy_cluster_func = _wrap_response(
+ functools.partial(
+ cluster_api.deploy_cluster, cluster_id, user=current_user,
+ ),
+ 202
+ )
+ redeploy_cluster_func = _wrap_response(
+ functools.partial(
+ cluster_api.redeploy_cluster, cluster_id, user=current_user,
+ ),
+ 202
+ )
+ patch_cluster_func = _wrap_response(
+ functools.partial(
+ cluster_api.patch_cluster, cluster_id, user=current_user,
+ ),
+ 202
+ )
+ check_cluster_health_func = _wrap_response(
+ functools.partial(
+ health_report_api.start_check_cluster_health,
+ cluster_id,
+ '%s/clusters/%s/healthreports' % (url_root, cluster_id),
+ user=current_user
+ ),
+ 202
+ )
+ return _group_data_action(
+ data,
+ add_hosts=update_cluster_hosts_func,
+ set_hosts=update_cluster_hosts_func,
+ remove_hosts=update_cluster_hosts_func,
+ review=review_cluster_func,
+ deploy=deploy_cluster_func,
+ redeploy=redeploy_cluster_func,
+ apply_patch=patch_cluster_func,
+ check_health=check_cluster_health_func
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/state", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def get_cluster_state(cluster_id):
+ """Get cluster state."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_state(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports", methods=['POST'])
+def create_health_reports(cluster_id):
+ """Create a health check report.
+
+ Must fields: ['name']
+ Optional fields: [
+ 'display_name', 'report', 'category', 'state', 'error_message'
+ ]
+ """
+ data = _get_request_data()
+ output = []
+ logging.info('create_health_reports for cluster %s: %s',
+ cluster_id, data)
+ if 'report_list' in data:
+ for report in data['report_list']:
+ try:
+ output.append(
+ health_report_api.add_report_record(
+ cluster_id, **report
+ )
+ )
+ except Exception as error:
+ logging.exception(error)
+ continue
+
+ else:
+ output = health_report_api.add_report_record(
+ cluster_id, **data
+ )
+
+ return utils.make_json_response(
+ 200,
+ output
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports", methods=['PUT'])
+def bulk_update_reports(cluster_id):
+ """Bulk update reports.
+
+ request data is a list of health report.
+ Each health report must contain ['name'],
+ may contain [
+ 'display_name', 'report', 'category', 'state', 'error_message'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ health_report_api.update_multi_reports(
+ cluster_id, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_health_reports(cluster_id):
+ """list health report for a cluster."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ health_report_api.list_health_reports(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports/<name>", methods=['PUT'])
+def update_health_report(cluster_id, name):
+ """Update cluster health report.
+
+ Supported fields: ['report', 'state', 'error_message']
+ """
+ data = _get_request_data()
+ if 'error_message' not in data:
+ data['error_message'] = ""
+
+ return utils.make_json_response(
+ 200,
+ health_report_api.update_report(
+ cluster_id, name, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports/<name>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def get_health_report(cluster_id, name):
+ """Get health report by cluster id and name."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ health_report_api.get_health_report(
+ cluster_id, name, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_cluster_hosts(cluster_id):
+ """Get cluster hosts."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(cluster_api.list_cluster_hosts(
+ cluster_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/clusterhosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_clusterhosts():
+ """Get cluster hosts."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(cluster_api.list_clusterhosts(
+ user=current_user, **data
+ ))
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/hosts/<int:host_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_host(cluster_id, host_id):
+ """Get clusterhost."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(cluster_api.get_cluster_host(
+ cluster_id, host_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_clusterhost(clusterhost_id):
+ """Get clusterhost."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(cluster_api.get_clusterhost(
+ clusterhost_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/hosts", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_cluster_host(cluster_id):
+ """update cluster hosts.
+
+ Must fields: ['machine_id']
+ Optional fields: ['name', 'reinstall_os', 'roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.add_cluster_host(cluster_id, user=current_user, **data)
+ )
+
+
+@app.route(
+ '/clusters/<int:cluster_id>/hosts/<int:host_id>',
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster_host(cluster_id, host_id):
+ """Update cluster host.
+
+ Supported fields: ['name', 'reinstall_os', 'roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_cluster_host(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/clusterhosts/<int:clusterhost_id>',
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_clusterhost(clusterhost_id):
+ """Update cluster host.
+
+ Supported fields: ['name', 'reinstall_os', 'roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/clusters/<int:cluster_id>/hosts/<int:host_id>',
+ methods=['PATCH']
+)
+@log_user_action
+@login_required
+@update_user_token
+def patch_cluster_host(cluster_id, host_id):
+ """Update cluster host.
+
+ Supported fields: ['roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_cluster_host(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/clusterhosts/<int:clusterhost_id>',
+ methods=['PATCH']
+)
+@log_user_action
+@login_required
+@update_user_token
+def patch_clusterhost(clusterhost_id):
+ """Update cluster host.
+
+ Supported fields: ['roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_clusterhost(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/clusters/<int:cluster_id>/hosts/<int:host_id>',
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_cluster_host(cluster_id, host_id):
+ """Delete cluster host."""
+ data = _get_request_data()
+ response = cluster_api.del_cluster_host(
+ cluster_id, host_id, user=current_user, **data
+ )
+ if 'status' in response:
+ return utils.make_json_response(
+ 202, response
+ )
+ else:
+ return utils.make_json_response(
+ 200, response
+ )
+
+
+@app.route(
+ '/clusterhosts/<int:clusterhost_id>',
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_clusterhost(clusterhost_id):
+ """Delete cluster host."""
+ data = _get_request_data()
+ response = cluster_api.del_clusterhost(
+ clusterhost_id, user=current_user, **data
+ )
+ if 'status' in response:
+ return utils.make_json_response(
+ 202, response
+ )
+ else:
+ return utils.make_json_response(
+ 200, response
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/config",
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_host_config(cluster_id, host_id):
+ """Get clusterhost config."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_host_config(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>/config", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_clusterhost_config(clusterhost_id):
+ """Get clusterhost config."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_clusterhost_config(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/config",
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster_host_config(cluster_id, host_id):
+ """update clusterhost config.
+
+ Supported fields: ['os_config', package_config']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_cluster_host_config(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>/config", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_clusterhost_config(clusterhost_id):
+ """update clusterhost config.
+
+ Supported fields: ['os_config', 'package_config']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_config(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/config",
+ methods=['PATCH']
+)
+@log_user_action
+@login_required
+@update_user_token
+def patch_cluster_host_config(cluster_id, host_id):
+ """patch clusterhost config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_cluster_host_config(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_clusterhost_config(clusterhost_id):
+ """patch clusterhost config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_clusterhost_config(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/config",
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_cluster_host_config(cluster_id, host_id):
+ """Delete clusterhost config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.del_clusterhost_config(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>/config", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_clusterhost_config(clusterhost_id):
+ """Delete clusterhost config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.del_clusterhost_config(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/state",
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_host_state(cluster_id, host_id):
+ """Get clusterhost state."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_host_state(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>/state", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_clusterhost_state(clusterhost_id):
+ """Get clusterhost state."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_clusterhost_state(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/state",
+ methods=['PUT', 'POST']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster_host_state(cluster_id, host_id):
+ """update clusterhost state.
+
+ Supported fields: ['state', 'percentage', 'message', 'severity']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_state(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@util.deprecated
+@app.route(
+ "/clusters/<clustername>/hosts/<hostname>/state_internal",
+ methods=['PUT', 'POST']
+)
+def update_cluster_host_state_internal(clustername, hostname):
+ """update clusterhost state.
+
+ Supported fields: ['ready']
+ """
+ # TODO(xicheng): it should be merged into update_cluster_host_state.
+ # TODO(xicheng): the api is not login required and no user checking.
+ data = _get_request_data()
+ clusters = cluster_api.list_clusters(name=clustername)
+ if not clusters:
+ raise exception_handler.ItemNotFound(
+ 'no clusters found for clustername %s' % clustername
+ )
+ cluster_id = clusters[0]['id']
+ hosts = host_api.list_hosts(name=hostname)
+ if not hosts:
+ raise exception_handler.ItemNotFound(
+ 'no hosts found for hostname %s' % hostname
+ )
+ host_id = hosts[0]['id']
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_state_internal(
+ cluster_id, host_id, **data
+ )
+ )
+
+
+@app.route(
+ "/clusterhosts/<int:clusterhost_id>/state",
+ methods=['PUT', 'POST']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_clusterhost_state(clusterhost_id):
+ """update clusterhost state.
+
+ Supported fields: ['state', 'percentage', 'message', 'severity']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_state(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@util.deprecated
+@app.route(
+ "/clusterhosts/<clusterhost_name>/state_internal",
+ methods=['PUT', 'POST']
+)
+def update_clusterhost_state_internal(clusterhost_name):
+ """update clusterhost state.
+
+ Supported fields: ['ready']
+ """
+ data = _get_request_data()
+ clusterhosts = cluster_api.list_clusterhosts()
+ clusterhost_id = None
+ for clusterhost in clusterhosts:
+ if clusterhost['name'] == clusterhost_name:
+ clusterhost_id = clusterhost['clusterhost_id']
+ break
+ if not clusterhost_id:
+ raise exception_handler.ItemNotFound(
+ 'no clusterhost found for clusterhost_name %s' % (
+ clusterhost_name
+ )
+ )
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_state_internal(
+ clusterhost_id, **data
+ )
+ )
+
+
+@app.route("/hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_hosts():
+ """List hosts.
+
+ Supported fields: ['name', 'os_name', 'owner', 'mac']
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(host_api.list_hosts(
+ user=current_user, **data
+ ))
+ )
+
+
+@app.route("/hosts/<int:host_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_host(host_id):
+ """Get host."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(host_api.get_host(
+ host_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/machines-hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_machines_or_hosts():
+ """Get list of machine of host if the host exists.
+
+ Supported filters: [
+ 'mac', 'tag', 'location', 'os_name', 'os_id'
+ ]
+ """
+ data = _get_request_args(os_id=_int_converter)
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ _filter_general(data, 'os_name')
+ _filter_general(data, 'os_id')
+ return utils.make_json_response(
+ 200,
+ _reformat_host(host_api.list_machines_or_hosts(
+ user=current_user, **data
+ ))
+ )
+
+
+@app.route("/machines-hosts/<int:host_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_machine_or_host(host_id):
+ """Get host."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(host_api.get_machine_or_host(
+ host_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/hosts/<int:host_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_host(host_id):
+ """update host.
+
+ Supported fields: ['name', 'reinstall_os']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_host(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_hosts():
+ """update hosts.
+
+ update a list of host as dict each may contains following keys: [
+ 'name', 'reinstall_os'
+ ]
+ """
+ data = _get_request_data_as_list()
+ return utils.make_json_response(
+ 200,
+ host_api.update_hosts(
+ data, user=current_user,
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_host(host_id):
+ """Delete host."""
+ data = _get_request_data()
+ response = host_api.del_host(
+ host_id, user=current_user, **data
+ )
+ if 'status' in response:
+ return utils.make_json_response(
+ 202, response
+ )
+ else:
+ return utils.make_json_response(
+ 200, response
+ )
+
+
+@app.route("/hosts/<int:host_id>/clusters", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def get_host_clusters(host_id):
+ """Get host clusters."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_host_clusters(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/config", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_host_config(host_id):
+ """Get host config."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_host_config(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/config", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_host_config(host_id):
+ """update host config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_host_config(host_id, user=current_user, **data)
+ )
+
+
+@app.route("/hosts/<int:host_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_host_config(host_id):
+ """patch host config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.patch_host_config(host_id, user=current_user, **data)
+ )
+
+
+@app.route("/hosts/<int:host_id>/config", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_host_config(host_id):
+ """Delete host config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.del_host_config(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/networks", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_host_networks(host_id):
+ """list host networks.
+
+ Supported filters: [
+ 'interface', 'ip', 'is_mgmt', 'is_promiscuous'
+ ]
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host_networks(
+ host_api.list_host_networks(
+ host_id, user=current_user, **data
+ )
+ )
+ )
+
+
+@app.route("/host/networks", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_hostnetworks():
+ """list host networks.
+
+ Supported filters: [
+ 'interface', 'ip', 'is_mgmt', 'is_promiscuous'
+ ]
+ """
+ data = _get_request_args(
+ is_mgmt=_bool_converter,
+ is_promiscuous=_bool_converter
+ )
+ return utils.make_json_response(
+ 200,
+ _reformat_host_networks(
+ host_api.list_hostnetworks(user=current_user, **data)
+ )
+ )
+
+
+@app.route(
+ "/hosts/<int:host_id>/networks/<int:host_network_id>",
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_host_network(host_id, host_network_id):
+ """Get host network."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_host_network(
+ host_id, host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/host/networks/<int:host_network_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_hostnetwork(host_network_id):
+ """Get host network."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_hostnetwork(
+ host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/networks", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_host_network(host_id):
+ """add host network.
+
+ Must fields: ['interface', 'ip', 'subnet_id']
+ Optional fields: ['is_mgmt', 'is_promiscuous']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200, host_api.add_host_network(host_id, user=current_user, **data)
+ )
+
+
+@app.route("/hosts/networks", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_host_networks():
+ """add host networks.
+
+ update a list of host network each may contain [
+ 'interface', 'ip', 'subnet_id', 'is_mgmt', 'is_promiscuous'
+ ]
+ """
+ data = _get_request_data_as_list()
+ return utils.make_json_response(
+ 200, host_api.add_host_networks(
+ data=data, user=current_user,)
+ )
+
+
+@app.route(
+ "/hosts/<int:host_id>/networks/<int:host_network_id>",
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_host_network(host_id, host_network_id):
+ """update host network.
+
+ supported fields: [
+ 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
+ 'is_promiscuous'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_host_network(
+ host_id, host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/host-networks/<int:host_network_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_hostnetwork(host_network_id):
+ """update host network.
+
+ supported fields: [
+ 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
+ 'is_promiscuous'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_hostnetwork(
+ host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/hosts/<int:host_id>/networks/<int:host_network_id>",
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_host_network(host_id, host_network_id):
+ """Delete host network."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.del_host_network(
+ host_id, host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/host-networks/<int:host_network_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_hostnetwork(host_network_id):
+ """Delete host network."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.del_hostnetwork(
+ host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/state", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_host_state(host_id):
+ """Get host state."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_host_state(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/state", methods=['PUT', 'POST'])
+@log_user_action
+@login_required
+@update_user_token
+def update_host_state(host_id):
+ """update host state.
+
+ Supported fields: [
+ 'state', 'percentage', 'message', 'severity'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_host_state(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<hostname>/state_internal", methods=['PUT', 'POST'])
+def update_host_state_internal(hostname):
+ """update host state.
+
+ Supported fields: ['ready']
+ """
+ data = _get_request_data()
+# host_id = int(host_id)
+# hosts = host_api.list_hosts(id=host_id)
+ hosts = host_api.list_hosts(name=hostname)
+ if not hosts:
+ raise exception_handler.ItemNotFound(
+ 'no hosts found for hostname %s' % hostname
+ )
+ host_id = hosts[0]['id']
+ return utils.make_json_response(
+ 200,
+ host_api.update_host_state_internal(
+ host_id, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_host_action(host_id):
+ """take host action.
+
+ Supported actions: [
+ 'poweron', 'poweroff', 'reset'
+ ]
+ """
+ data = _get_request_data()
+ poweron_func = _wrap_response(
+ functools.partial(
+ host_api.poweron_host, host_id, user=current_user,
+ ),
+ 202
+ )
+ poweroff_func = _wrap_response(
+ functools.partial(
+ host_api.poweroff_host, host_id, user=current_user,
+ ),
+ 202
+ )
+ reset_func = _wrap_response(
+ functools.partial(
+ host_api.reset_host, host_id, user=current_user,
+ )
+ )
+ return _group_data_action(
+ data,
+ poweron=poweron_func,
+ poweroff=poweroff_func,
+ reset=reset_func,
+ )
+
+
+def _get_headers(*keys):
+ """Get proxied request headers."""
+ headers = {}
+ for key in keys:
+ if key in request.headers:
+ headers[key] = request.headers[key]
+ return headers
+
+
+def _get_response_json(response):
+ """Get proxies request json formatted response."""
+ try:
+ return response.json()
+ except ValueError:
+ return response.text
+
+
+@app.route("/proxy/<path:url>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_get(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding', app.config['AUTH_HEADER_NAME'],
+ 'Cookie'
+ )
+ response = requests.get(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ params=_get_request_args(),
+ headers=headers,
+ stream=True
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+@app.route("/proxy/<path:url>", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_post(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding',
+ 'Cookie'
+ )
+ response = requests.post(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ data=request.data,
+ headers=headers
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+@app.route("/proxy/<path:url>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_put(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding',
+ 'Cookie'
+ )
+ response = requests.put(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ data=request.data,
+ headers=headers
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+@app.route("/proxy/<path:url>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_patch(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding',
+ 'Cookie'
+ )
+ response = requests.patch(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ data=request.data,
+ headers=headers
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+@app.route("/proxy/<path:url>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_delete(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding',
+ 'Cookie'
+ )
+ response = requests.delete(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ headers=headers
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+def init():
+ logging.info('init flask')
+ database.init()
+ adapter_api.load_adapters()
+ metadata_api.load_metadatas()
+ adapter_api.load_flavors()
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ init()
+ app.run(host='0.0.0.0')
diff --git a/compass-deck/api/api.raml b/compass-deck/api/api.raml
new file mode 100644
index 0000000..6855b57
--- /dev/null
+++ b/compass-deck/api/api.raml
@@ -0,0 +1,4027 @@
+#%RAML 0.8
+title: Compass
+version: v1
+baseUri: http://10.145.89.151/api
+mediaType: application/json
+
+
+/permissions:
+ get:
+ body:
+ application/json:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "alias": "list permissions",
+ "description": "list all permissions",
+ "id": 1,
+ "name": "list_permissions"
+ },
+ ]
+ description: List all permissions
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{permission_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "alias": "list permissions",
+ "description": "list all permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ message: "Cannot find the record in table Permission: {'id': '<permission_id>'}"
+ }
+ description: List a specific permission info
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/users:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "id": 1,
+ "email": "someuser@email.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true,
+ "created_at": "--timestamp---",
+ "last_login_at": "--timestamp---"
+ },
+ ]
+
+ description: Lists information for all users
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ queryParameters:
+ email:
+ is_admin:
+ active:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "email": "admin@someemail.com",
+ "password": "admin",
+ "firstname": "First",
+ "lastname": "Last"
+ }
+ responses:
+ 201:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "email": "user3@someemail.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true,
+ "created_at": "--timestamp---",
+ "last_login_at": "--timestamp---"
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "bad request"
+ }
+ 403:
+ body:
+ application/json:
+ example: |
+ {
+ "forbidden"
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The user already exists!"
+ }
+ description: Creates a user(admin only)
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{user_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 1,
+ "email": "someuser@email.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true,
+ "created_at": "2014-03-25 12:00:00",
+ "last_login_at": "2014-03-25 12:05:00"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The user with id 'some--id--' cannot be found!"
+ }
+ description: Lists information for a specific user
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {"password": 123}
+ responses:
+ 201:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "email": "user3@someemail.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The user with id 'some--id--' cannot be found!"
+ }
+ description: Updates user’s information
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "email": "user3@someemail.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The user cannot be found!"
+ }
+ description: Deletes a user(admin only)
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /permissions:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "user_id": 1,
+ "description": "list all permissions",
+ "permission_id": 1,
+ "updated_at": "2014-10-17 16:28:21",
+ "alias": "list permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "itemNotFound",
+ "message": "The user with id 'some--id--' cannot be found!"
+ }
+ description: Lists permissions for a specified user
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "add_permissions": [1,2,3],
+ "remove_permissions": [1],
+ "set_permissions": [1],
+ "disable_user": [1],
+ "enable_user": [1]
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ Add permission:
+
+ [
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "user_id": 1,
+ "description": "list all permissions",
+ "permission_id": 1,
+ "updated_at": "2014-10-17 16:28:21",
+ "alias": "list permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+
+ Remove permission:
+
+ [
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "user_id": 1,
+ "description": "list all permissions",
+ "permission_id": 1,
+ "updated_at": "2014-10-17 16:28:21",
+ "alias": "list permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+
+ Set Permission:
+
+ [
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "user_id": 1,
+ "description": "list all permissions",
+ "permission_id": 1,
+ "updated_at": "2014-10-17 16:28:21",
+ "alias": "list permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+
+ Enable user:
+
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "updated_at": "2014-10-17 16:28:21",
+ "email": "admin@huawei.com",
+ "is_admin": true,
+ "active": true,
+ "id": 1
+ }
+
+ Disable user:
+
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "updated_at": "2014-10-17 16:28:21",
+ "email": "admin@huawei.com",
+ "is_admin": true,
+ "active": true,
+ "id": 1
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "itemNotFound",
+ "message": "The user cannot be found!"
+ }
+ description: Adds/Removes permissions, Enable/Disable a user (admin only)
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /token:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "email": "admin@huawei.com",
+ "password": "admin"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "expire_timestamp": "2014-10-06 13:25:23",
+ "token": "$1$c1ZWGYEn$WTg57cnP4pEwd9JMJ7beS/",
+ "user_id": 1,
+ "id": 3
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "unauthorized",
+ "message": "Either email or password is wrong!"
+ }
+ description: Authenticates and generates a token
+ /login:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "email": "admin@huawei.com",
+ "password": "admin"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "expire_timestamp": "2014-10-06 13:25:23",
+ "token": "$1$c1ZWGYEn$WTg57cnP4pEwd9JMJ7beS/",
+ "user_id": 1,
+ "id": 3
+ }
+ 401:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "unauthorized",
+ "message": "Either email or password is wrong!"
+ }
+ 403:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "userDisabled",
+ "message”: "User is disabled !"
+ }
+ description: Login
+ /logout:
+ post:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "expire_timestamp": "2014-10-17 18:30:29",
+ "token": "$1$AFqIS5Kn$1ASgOkPv.G1a7pkRRHKY.0",
+ "user_id": 1,
+ "id": 1
+ }
+ ]
+ 401:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "invalid user token: $1$AFqIS5Kn$1ASgOkPv.G1a7pkRRHKY.0",
+ }
+ description: Logout
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/switches:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 2
+ }
+ ]
+ description: Lists switches
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "ip": "172.29.8.40",
+ "credentials":
+ {
+ "version": "2c",
+ "community": "public"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 2
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "IP address '192.168.1.1' already exists"
+ }
+ description: Creates a switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{switch_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the switch which id is '1'."
+ }
+ description: Lists a switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "ip": "172.29.8.40",
+ "credentials":
+ {
+ "version": "2c",
+ "community": "private"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "private"
+ },
+ "id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot update the switch which id is '1'! The switch does not exists."
+ }
+ description: Set the switch properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "ip": "172.29.8.40",
+ "credentials":
+ {
+ "version": "3",
+ "community": "public"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "3",
+ "community": "public"
+ },
+ "id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot update the switch which id is '1'! The switch does not exists."
+ }
+ description: Updates the switch properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.41",
+ "created_at": "2014-10-17 17:45:17",
+ "updated_at": "2014-10-17 17:45:17",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 3
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table Switch: {'id': 4}"
+ }
+ description: Delete switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /machines:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ Get:
+ [
+ {
+ "vlans": [],
+ "updated_at": "2014-10-17 18:02:21",
+ "created_at": "2014-10-17 18:02:21",
+ "switch_id": 3,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.41",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "10",
+ "switch_machine_id": 204
+ }
+ ]
+ queryParameters:
+ port:
+ portStart:
+ portEnd:
+ portRange:
+ PortPrefix:
+ PortSuffix:
+ vlans:
+ mac:
+ tag:
+ location:
+ description: Lists machines for a specified switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "mac": "28:6e:d4:46:c4:25",
+ "port": "1",
+ "vlans": "88",
+ "ipmi_credentials": {
+ "ip": "1.2.3.4",
+ "username": "test",
+ "password": "test"
+ },
+ "tag": "tag",
+ "location": {
+ "column": "1",
+ "row": "1",
+ "unit": "1"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 1,
+ "mac": "28:6e:d4:47:c8:6c",
+ "vlan": 1,
+ "port": "10"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The switch does not exists."
+ }
+ description: Manually add a machine
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /machines:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "mac": "28:6e:d4:46:c4:25",
+ "port": "1",
+ "vlans": "88"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "duplicate_switches_machines": [
+ {
+ "mac": "a1:b2:c3:d4:e1:f6",
+ "port": "101"
+ }
+ ],
+ "switches_machines": [
+ {
+ "vlans": [],
+ "updated_at": "2015-05-07 10:55:12",
+ "created_at": "2015-05-07 10:55:12",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "70:7b:e8:e2:72:21",
+ "tag": {},
+ "location": {},
+ "switch_ip": "10.145.8.10",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "204",
+ "switch_machine_id": 1
+ },
+ {
+ "vlans": [],
+ "updated_at": "2015-05-07 10:55:12",
+ "created_at": "2015-05-07 10:55:12",
+ "switch_id": 2,
+ "id": 2,
+ "mac": "a1:b2:c3:d4:e1:f6",
+ "tag": {},
+ "location": {},
+ "switch_ip": "10.145.8.10",
+ "ipmi_credentials": {},
+ "machine_id": 2,
+ "port": "101",
+ "switch_machine_id": 2
+ },
+ {
+ "vlans": [],
+ "updated_at": "2015-05-07 10:55:12",
+ "created_at": "2015-05-07 10:55:12",
+ "switch_id": 3,
+ "id": 3,
+ "mac": "a1:b2:c3:d4:e5:f9",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 3,
+ "port": "121",
+ "switch_machine_id": 3
+ }
+ ],
+ "fail_switches_machines": [
+ {
+ "mac": "a1:b5:c3:d4:e5:f9",
+ "port": "131"
+ },
+ {
+ "mac": "a1:b2:c3:d4:e1:f6",
+ "port": "13"
+ }
+ ]
+ }
+ description: Batch switch machines. If the machine is connected to other switch or switch does not exist, it will be added to fail_switches_machines and return. If machine is already existed, it will be added to duplicate_switches_machines.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+
+ /{id}/machines/{machine_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "vlans": [
+ 88
+ ],
+ "updated_at": "2014-10-17 17:40:13",
+ "created_at": "2014-10-17 17:40:13",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "7",
+ "switch_machine_id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table SwitchMachine: {'machine_id': 1000, 'switch_id': 2}"
+ }
+ description: Get machine of a specified switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "port": "80",
+ "vlans": "88",
+ "pmi_credentials": "pmi_credentials here",
+ "tag": "tag here",
+ "location":
+ {"building": "E5"}
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "vlans": [
+ 88
+ ],
+ "updated_at": "2014-10-17 17:40:13",
+ "created_at": "2014-10-17 17:40:13",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5"
+ },
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "7",
+ "switch_machine_id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table SwitchMachine: {'machine_id': 1000, 'switch_id': 2}"
+ }
+ description: set machine property of a specified switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "port": "80",
+ "vlans": "88",
+ "pmi_credentials": "pmi_credentials here",
+ "tag": "tag here",
+ "location":
+ {"city": "Beijing"}
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "vlans": [
+ 88
+ ],
+ "updated_at": "2014-10-17 17:40:13",
+ "created_at": "2014-10-17 17:40:13",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "7",
+ "switch_machine_id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table SwitchMachine: {'machine_id': 1000, 'switch_id': 2}"
+ }
+ description: update machine property of a specified switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "vlans": [
+ 88
+ ],
+ "updated_at": "2014-10-17 17:40:13",
+ "created_at": "2014-10-17 17:40:13",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "7",
+ "switch_machine_id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table SwitchMachine: {'machine_id': 1000, 'switch_id': 2}"
+ }
+ description: Delete a machine from a switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{switch_id}/action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "find_machines": 1,
+ "add_macheins": [{"machine_id":1,"port":"10"}],
+ "rermove_machines": 1,
+ "set_machines": [{"machine_id": 1, "port": "10"}]
+ }
+ responses:
+ 202:
+ body:
+ application/json:
+ example: |
+ find_machines:
+ {
+ "status": "action {'find_machines': None} sent",
+ "details": {}
+ }
+ 200:
+ body:
+ application/json:
+ example: |
+ add_machines:
+ [
+ {
+ "vlans": [],
+ "updated_at": "2014-10-17 17:56:44",
+ "created_at": "2014-10-17 17:56:44",
+ "switch_id": 3,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.41",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "10",
+ "switch_machine_id": 203
+ }
+ ]
+
+ remove_machines:
+ []
+ set_machines:
+ [
+ {
+ "vlans": [],
+ "updated_at": "2014-10-17 17:56:44",
+ "created_at": "2014-10-17 17:56:44",
+ "switch_id": 3,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.41",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "10",
+ "switch_machine_id": 203
+ }
+ ]
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot update the switch which id is '1'! The switch does not exists."
+ }
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/switchbatch:
+ post:
+ body:
+ application/json:
+ schema: |
+ [{
+ "switch_ip": "127.0.0.1":
+ "credentials":{
+ "version": "2c",
+ "community": "public"
+ },{
+ "switch_ip": "127.0.0.2"
+ }]
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "switches": [
+ {
+ "vendor": "Huawei",
+ "ip": "10.145.8.10",
+ "created_at": "2015-05-04 16:13:34",
+ "updated_at": "2015-05-04 16:13:34",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 2
+ },
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2015-05-04 16:13:34",
+ "updated_at": "2015-05-04 16:13:34",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {},
+ "id": 3
+ }
+ ],
+ "fail_switches": [
+ {
+ "ip": "172.29.8.40"
+ }
+ ]
+ }
+ description: Batch switches. If switch ip already existed, switch data will be added in fail_switches list and return.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/machines:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-17 23:22:53",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ },
+ ]
+ queryParameters:
+ mac:
+ tag:
+ location:
+ description: Lists machines
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{machine_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-17 23:22:53",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID '$machine_id' cannot be found!"
+ }
+ description: Lists machines of a specific machine
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "ipmi_credentials": {
+ "builder": "huawei"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-17 23:58:46",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {
+ "builder": "huawei"
+ },
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID “$machine_id” cannot be found!"
+ }
+ description: set machine properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "ipmi_credentials": {
+ "builder": "huawei"
+ },
+ "tag": {
+ "type": "ES200"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-18 00:03:12",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {
+ "type": "ES200"
+ },
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID '$machine_id' cannot be found!"
+ }
+ description: updatge machine properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-18 00:03:12",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {
+ "type": "ES200"
+ },
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID '$machine_id' cannot be found!"
+ }
+ description: Delete a machine (admin only)
+ /action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "tag": {"builder": "huawei"},
+ "poweron": "true",
+ "poweroff": "true",
+ "reset": "true"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ tag example:
+
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-18 00:10:58",
+ "id": 2,
+ "switches": [
+ {
+ "switch_ip": "172.29.8.40",
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+ ],
+ "mac": "00:0c:29:2b:c9:d4",
+ "tag": {
+ "builder": "huawei"
+ },
+ "location": {},
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+
+ poweron/ poweroff / reset is null example:
+
+ {
+ "status": "poweron 00:0c:29:2b:c9:d4 action sent",
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID '$machine_id' cannot be found!"
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine haven't set IPMI info!"
+ }
+ description: machine actions
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/flavors:
+ /{flavor_id}/metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "flavor_config": {
+ "neutron_config": {...},
+ "security": {...},
+ "ha_proxy": {...},
+ "network_mapping": {...}
+
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {message: "flavor <flavor_id> does not exist"}
+ description: List specific flavor metadata.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{flavor_id}/ui_metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "flavor_config":
+ {
+ "category": "service_credentials",
+ "modifiable_data": [
+ "username",
+ "password",
+ ]
+ "table_display_header": [
+ "Service",
+ "UserName",
+ "Password",
+ "Action",
+ ]
+ "accordion_heading": "OpenStack Database and Queue Credentials",
+ "action”: true,
+ "data_structure": "table"
+ },
+ {...},
+ {...}
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {message: "flavor <flavor_id> does not exist"}
+ description: List specific flavor ui metadata.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/adapters:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [{
+ "flavors": [
+ {
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "display_name": "All-In-One",
+ "id": 4,
+ "template": "allinone.tmpl",
+ "name": "allinone"
+ },
+ ],
+ "package_installer": {
+ "id": 1,
+ "alias": "chef_installer",
+ "name": "chef_installer",
+ "settings": {
+ "chef_server_ip": "10.145.88.211",
+ "client_name": "",
+ "chef_server_dns": "compass",
+ "databags": [],
+ "chef_url": "https://10.145.88.211",
+ "key_dir": ""
+ }
+ },
+ "name": "openstack_icehouse",
+ "os_installer": {
+ "id": 1,
+ "alias": "cobbler",
+ "name": "cobbler",
+ "settings": {
+ "credentials": {
+ "username": "cobbler",
+ "password": "cobbler"
+ },
+ "cobbler_url": "http://10.145.88.211/cobbler_api"
+ }
+ },
+ "supported_oses": [
+ {
+ "os_id": 1,
+ "id": 1,
+ "name": "Ubuntu-12.04-x86_64"
+ },
+ {
+ "os_id": 2,
+ "id": 2,
+ "name": "CentOS-6.5-x86_64"
+ }
+ ],
+ "display_name": "OpenStack Icehouse",
+ "id": 3
+ }]
+ queryParameters:
+ name:
+ description: Lists information for all adapters
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "openstack",
+ "display": "OpenStack",
+ "os_installer": "cobbler",
+ "package_installer": "chef",
+ "roles": [ { "display_name": "compute",
+ "name": "os-compute-worker"
+ },
+ { "display_name": "controller",
+ "name": "os-controller"
+ },
+ { "display_name": "network",
+ "name": "os-network"
+ },
+ { "display_name": "storage",
+ "name": "os-block-storage-worker"
+ ],
+ "compatible_os": [
+ {
+ "name": "CentOs",
+ "os_id": 1
+ },
+ {
+ "name": "Ubuntu",
+ "os_id": 2
+ }
+ ]
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The adapter with id 'some_id' cannot be found!"
+ }
+ description: Lists information for a specified adapter
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /oses/{os_id}/metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config": {
+ "security": {
+ "_self": {
+ "mapping_to": "",
+ "description": null,
+ "required_in_whole_config": true,
+ "display_type": null,
+ "js_validator": null,
+ "default_value": null,
+ "field_type": "dict",
+ "name": "security",
+ "required_in_options": false,
+ "is_required": false,
+ "options": null
+ },
+ },
+ "os_config": {
+ "server_credentials": {
+ "_self": {
+ "mapping_to": "server_credentials",
+ "description": null,
+ "required_in_whole_config": true,
+ "display_type": null,
+ "js_validator": null,
+ "default_value": null,
+ "field_type": "dict",
+ "name": "server_credentials",
+ "required_in_options": false,
+ "is_required": false,
+ "options": null
+ },
+ "username": {
+ "_self": {
+ "mapping_to": "username",
+ "description": "username",
+ "required_in_whole_config": false,
+ "display_type": "text",
+ "js_validator": null,
+ "default_value": "root",
+ "field_type": "basestring",
+ "name": "username",
+ "required_in_options": false,
+ "is_required": true,
+ "options": null
+ }
+ },
+ },
+ },
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The adapter with id 'some_id' cannot be found!"
+ }
+ description: Lists config formats for a specified adapter and os
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /oses/{os_id}/ui_metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_global_config": [
+ {
+ "title": "Server Credentials",
+ "data": [
+ {
+ "default_value": "root",
+ "display_name": "User name",
+ "name": "username",
+ "display_type": "text",
+ "is_required": "true",
+ "placeholder": "Username",
+ "order": 1
+ },
+ {
+ "display_name": "Confirm Password",
+ "name": "confirmPassword",
+ "datamatch": "password",
+ "display_type": "password",
+ "is_required": "true",
+ "placeholder": "Confirm Password",
+ "order": 3
+ },
+ {
+ "display_name": "Password",
+ "name": "password",
+ "display_type": "password",
+ "is_required": "true",
+ "placeholder": "Password",
+ "order": 2
+ }],
+ "order": 2,
+ "name": "server_credentials"
+ },
+ }
+ }]
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "os <os_id> does not exist"
+ }
+ description: List specified os ui metadata.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+
+/subnets:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [{
+ "updated_at": "2014-10-18 21:24:46",
+ "subnet": "10.145.88.0/23",
+ "created_at": "2014-10-18 21:24:46",
+ "id": 1,
+ "name": "10.145.88.0/23"
+ }]
+ description: Gets all subnetworks information
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "subnet": "10.172.20.0/24",
+ "name": "test_subnet"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "updated_at": "2014-10-18 21:24:46",
+ "subnet": "10.145.88.0/23",
+ "created_at": "2014-10-18 21:24:46",
+ "id": 1,
+ "name": "10.145.88.0/23"
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Keyword '$somekey' cannot be recognized!"
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnet already exists!"
+ }
+ description: Creates one subnetwork
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{subnet_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "updated_at": "2014-10-18 21:24:46",
+ "subnet": "10.145.88.0/23",
+ "created_at": "2014-10-18 21:24:46",
+ "id": 1,
+ "name": "10.145.88.0/23"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnetwork with id 'some_id' cannot be found!"
+ }
+ description: Gets one subnetwork info
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "subnet": "10.172.20.0/24",
+ "name": "update_subnet"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "updated_at": "2014-10-18 21:44:17",
+ "subnet": "10.145.86.0/23",
+ "created_at": "2014-10-18 21:43:50",
+ "id": 1,
+ "name": "10.145.86.0/23"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnetwork with id 'some_id' cannot be found!"
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnet name already exists!"
+ }
+ description: set subnet properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 403:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnetwork is in use by some interface. Cannot delete it."
+ }
+
+
+ {
+ "message": "Subnetwork can only be deleted by creator or admin!"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnetwork with id 'some_id' cannot be found!"
+ }
+ description: Deletes a subnetwork (owner, admin only)
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/clusters:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "created_at": "2014-10-18 23:01:23",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster1",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:01:23",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 1
+ }
+ ]
+ queryParameters:
+ name:
+ os_name:
+ owner:
+ adapter_name:
+ flavor_name:
+ description: Lists all information for all clusters
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "adapter_id": 3,
+ "name": "add_cluster",
+ "os_id": 1,
+ "flavor_id": 1
+ }
+ responses:
+ 201:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-18 23:01:23",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster1",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:01:23",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 1
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with name 'cluster_01' already exists!"
+ }
+ description: Creates a new cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{cluster_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-18 23:01:23",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster1",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:01:23",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Lists information for a specified cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "name": "update_cluster"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-18 23:16:02",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster_new",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:16:39",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 2
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster <cluster_id> not found"
+ }
+ description: set properties of cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-18 23:01:23",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster1",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:01:23",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 1
+ }
+ 403:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster has been deployed or is being installed. Not allowed to delete it now!"
+ }
+ description: Deletes a specific cluster before deploy (admin, owner only). Hosts will be still kept even cluster(s) is deleted.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /config:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config": {
+ },
+ "os_config": {
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Gets config information for a specified cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config":{
+ "general": {
+ "language": "EN",
+ "timezone": "PDT",
+ "domain": "xxx",
+ "default_gateway": "10.0.0.1"
+ },
+ "server_credentials": {
+ "username": "admin",
+ "password": "admin"
+ },
+ "partition": {
+ "/var" : {
+ "_type": "$path",
+ "max_size": "20",
+ "size_percentage": "20"
+ }
+ }
+ },
+ "package_config":{
+ "network_mapping": {
+ "management": {
+ "interface": "eth0"
+ },
+ "tenant": {
+ "interface": "eth1"
+ },
+ "storage": {
+ "interface":" eth2"
+ },
+ "public": {
+ "interface": "eth3"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config”: {
+ "general”: {
+ "language": "EN",
+ "timezone": "PDT",
+ "domain": "xxx",
+ "default_gateway": "10.0.0.1"
+ },
+ "server_crendentials": {
+ "username": "admin",
+ "password": "admin"
+ },
+ "partition": {
+ "/var" : {
+ "max_size": "20",
+ "size_percentage": "20",
+ },
+ }
+ }
+
+ {
+ "package_config": {
+ "network_mapping": {
+ "management": {
+ "interface": "eth0"
+ },
+ "tenant": {
+ "interface": "eth1"
+ },
+ "storage": {
+ "interface":"eth2"
+ },
+ "public": {
+ "interface": "eth3"
+ }
+ }
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: set properties in cluster config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "package_config": {
+ "security": {
+ "dashboard_credentials": {
+ "username": "root"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config":{
+ "security": {
+ "service_crendentials": {
+ "image": {
+ "username": "admin",
+ "password": "admin"
+ },
+ ...
+ },
+ "dashboard_credentials":{
+ "username": "root",
+ "password": "admin"
+ }
+ }
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: update properties in cluster config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config":{
+ "security": {
+ "service_crendentials": {
+ "image": {
+ "username": "admin",
+ "password": "admin"
+ },
+ ...
+ }
+ }
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: delete cluster config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /state:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config": {
+ },
+ "os_config": {
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: get cluster state
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /hosts:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "id" : 1,
+ "name": "host_01",
+ "dns": "xxx",
+ "os": "Centos",
+ "mac": "---MAC-address---",
+ "machine_id": 1,
+ "os_installed": true,
+ },
+ …...
+ ]
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Gets the information of the hosts belonging to this cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "machine_id": 1,
+ "name": "cluster_host",
+ "reinstall_os": "True",
+ "roles": ["allinone-compute"]
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_installer": {
+ "id": 1,
+ "alias": "cobbler",
+ "name": "cobbler",
+ "settings": {
+ "credentials": {
+ "username": "cobbler",
+ "password": "cobbler"
+ },
+ "cobbler_url": "http://10.145.88.211/cobbler_api"
+ }
+ },
+ "ip": null,
+ "clusterhost_id": 2,
+ "updated_at": "2014-10-18 23:47:47",
+ "switches": [
+ {
+ "switch_ip": "172.29.8.40",
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+ ],
+ "os_installed": false,
+ "tag": {},
+ "cluster_id": 2,
+ "id": 2,
+ "switch_ip": "172.29.8.40",
+ "networks": {
+ },
+ "hostname": null,
+ "reinstall_os": true,
+ "owner": "admin@huawei.com",
+ "port": "4",
+ "location": {},
+ "os_name": "CentOS-6.5-x86_64",
+ "reinstall_distributed_system": true,
+ "mac": "00:0c:29:2b:c9:d4",
+ "host_id": 2,
+ "distributed_system_installed": false,
+ "name": "None.cluster_new",
+ "roles": [],
+ "clustername": "cluster_new",
+ "created_at": "2014-10-18 23:47:47",
+ "machine_id": 2
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "host <host_id> already exists"
+ }
+ description: add host to a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{host_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "host_01",
+ "dns": "xxx",
+ "os": "Centos",
+ "mac": "---MAC-address---",
+ "machine_id": 1,
+ "os_installed": true,
+ "links": [
+ {
+ "href" : "/hosts/1",
+ "rel": "self"
+ },
+ {
+ "href": "/clusters/1/hosts/1/config",
+ "rel": "host package config"
+ }
+ ]
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: get host of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "name": "update_cluster_host",
+ "reinstall_os": "False",
+ "roles": ["ha-proxy"]
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_installer": {
+ "id": 1,
+ "alias": "cobbler",
+ "name": "cobbler",
+ "settings": {
+ "credentials": {
+ "username": "cobbler",
+ "password": "cobbler"
+ },
+ "cobbler_url": "http://10.145.88.211/cobbler_api"
+ }
+ },
+ "ip": null,
+ "clusterhost_id": 2,
+ "updated_at": "2014-10-19 00:10:43",
+ "switches": [
+ {
+ "switch_ip": "172.29.8.40",
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+ ],
+ "os_installed": false,
+ "tag": {},
+ "cluster_id": 2,
+ "id": 2,
+ "switch_ip": "172.29.8.40",
+ "networks": {},
+ "hostname": null,
+ "reinstall_os": true,
+ "owner": "admin@huawei.com",
+ "port": "4",
+ "location": {},
+ "os_name": "CentOS-6.5-x86_64",
+ "reinstall_distributed_system": true,
+ "mac": "00:0c:29:2b:c9:d4",
+ "host_id": 2,
+ "distributed_system_installed": false,
+ "name": "None.cluster_new",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "clustername": "cluster_new",
+ "created_at": "2014-10-18 23:47:47",
+ "machine_id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host properties of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "roles": "os-controller"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_installer": {
+ "id": 1,
+ "alias": "cobbler",
+ "name": "cobbler",
+ "settings": {
+ "credentials": {
+ "username": "cobbler",
+ "password": "cobbler"
+ },
+ "cobbler_url": "http://10.145.88.211/cobbler_api"
+ }
+ },
+ "ip": null,
+ "clusterhost_id": 2,
+ "updated_at": "2014-10-19 00:10:43",
+ "switches": [
+ {
+ "switch_ip": "172.29.8.40",
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+ ],
+ "os_installed": false,
+ "tag": {},
+ "cluster_id": 2,
+ "id": 2,
+ "switch_ip": "172.29.8.40",
+ "networks": {},
+ "hostname": null,
+ "reinstall_os": true,
+ "owner": "admin@huawei.com",
+ "port": "4",
+ "location": {},
+ "os_name": "CentOS-6.5-x86_64",
+ "reinstall_distributed_system": true,
+ "mac": "00:0c:29:2b:c9:d4",
+ "host_id": 2,
+ "distributed_system_installed": false,
+ "name": "None.cluster_new",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ },
+ {
+ "name": "new-role",
+ ...
+ }
+ ],
+ "clustername": "cluster_new",
+ "created_at": "2014-10-18 23:47:47",
+ "machine_id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: update host properties of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ description: delete host from a cluster
+ /config:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ ...
+ },
+ "package_config": {
+ ...
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: get config of a host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config": {
+ "general": {
+ "language": "EN",
+ "timezone": "UTC",
+ "http_proxy": "http://127.0.0.1:3128",
+ "https_proxy": "http://127.0.0.1:3128",
+ "no_proxy": [
+ "127.0.0.1",
+ "compass"
+ ],
+ "ntp_server": "127.0.0.1",
+ "dns_servers": [
+ "127.0.0.1"
+ ],
+ "domain": "ods.com",
+ "search_path": [
+ "ods.com"
+ ],
+ "default_gateway": "127.0.0.1"
+ },
+ "server_credentials": {
+ "username": "root",
+ "password": "root"
+ },
+ "partition": {
+ "/var": {
+ "max_size": "100G",
+ "percentage": 10,
+ "size": "1G"
+ }
+ }
+ },
+ "package_config": {
+ "network_mapping": {
+ "management": {
+ "interface": "eth0"
+ },
+ "tenant": {
+ "interface": "eth1"
+ },
+ "storage": {
+ "interface":"eth2"
+ },
+ "public": {
+ "interface": "eth3"
+ }
+ },
+ "services_credentials": {
+ "image": {
+ "username": "xxx",
+ "password": "xxx"
+ },
+ "metering": {
+ "username": "xxx",
+ "password": "xxx"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ …..
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config": {
+ "general": {
+ "language": "EN",
+ "timezone": "UTC",
+ "http_proxy": "http://127.0.0.1:3128",
+ "https_proxy": "http://127.0.0.1:3128",
+ "no_proxy": [
+ "127.0.0.1",
+ "compass"
+ ],
+ "ntp_server": "127.0.0.1",
+ "dns_servers": [
+ "127.0.0.1"
+ ],
+ "domain": "ods.com",
+ "search_path": [
+ "ods.com"
+ ],
+ "default_gateway": "127.0.0.1"
+ },
+ "server_credentials": {
+ "username": "root",
+ "password": "root"
+ },
+ "partition": {
+ "/var": {
+ "max_size": "100G",
+ "percentage": 10,
+ "size": "1G"
+ }
+ }
+ },
+ "package_config": {
+ "network_mapping": {
+ "management": {
+ "interface": "eth0"
+ },
+ "tenant": {
+ "interface": "eth1"
+ },
+ "storage": {
+ "interface":"eth2"
+ },
+ "public": {
+ "interface": "eth3"
+ }
+ },
+ "services_credentials": {
+ "image": {
+ "username": "xxx",
+ "password": "xxx"
+ },
+ "metering": {
+ "username": "xxx",
+ "password": "xxx"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ ...//the same as PATCH cluster config
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: update host config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ ...//the same as PATCH cluster config
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: delete host config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /state:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "INSTALLING",
+ "percentage": 0.5,
+ "severity": "INFO",
+ "message": "-----some--message-----",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: get host state of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "state": "INSTALLING"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "SUCCESSFUL",
+ "percentage": 1,
+ "severity": "INFO",
+ "message": "-----some--message-----",
+ "updated_at": "---timestamp---"
+ }
+ OR
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "ERROR",
+ "percentage": 0.7,
+ "severity": "ERROR",
+ "message": "---some-error-message---",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host state properties of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "add_hosts": {
+ "machines": [{
+ "machine_id": 1,
+ "host_id": 1,
+ "reinstall_os": "True"
+ },{
+ "machine_id": 2,
+ "host_id": 2
+ }]
+ },
+ "set_hosts": {
+ "machines": [{
+ "machine_id": 3
+ },{
+ "machine_id": 4
+ }]
+ },
+ "remove_hosts": {
+ "hosts": [1]
+ },
+ "review": {
+ "hosts": [1,2,3]
+ },
+ "deploy": {
+ "hosts": [1,2,3]
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "hosts": [
+ {
+ "id" : 5,
+ "machine_id": 10
+ },
+ {
+ "id" : 6,
+ "machine_id": 11
+ },
+ {
+ "id" : 7,
+ "machine_id": 12
+ }
+ ]
+ }
+
+ OR
+
+ {
+ "hosts": [
+ {
+ "id" : 1,
+ "machine_id": 13
+ },
+ {
+ "id" : 2,
+ "machine_id": 14
+ },
+ {
+ "id" : 3,
+ "machine_id": 15
+ }
+ ]
+ }
+
+ OR
+
+ {
+ "hosts": [
+ {
+ "id" : 1,
+ "machine_id": 13
+ }
+ ]
+ }
+
+ OR
+ {
+ "hosts": [
+ {
+ "id" : 1,
+ "machine_id": 10
+ },
+ {
+ "id" : 2,
+ "machine_id": 11
+ },
+ {
+ "id" : 3,
+ "machine_id": 12
+ }
+ ]
+ }
+
+ OR
+
+ {
+ "cluster": {"id": 1},
+ "hosts": [{"id": 1}, {"id": 2}, {"id": 3}]
+ }
+
+ OR
+
+ {
+ "status": "deploy action sent",
+ "cluster": {
+ "id": 1,
+ },
+ "hosts": [
+ {
+ "id": 3
+ }
+ ]
+ }
+
+
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Takes an action for a specific cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config": {
+ },
+ "os_config": {
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Get metadata of a specific cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+
+/hosts:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "id” : 1,
+ "name": "host_01",
+ "machine_id": 1,
+ "mac": "---MAC-address--",
+ "ip": "192.168.1.2",
+ "os": "CentOS",
+ "os_installed": false,
+ "clusters": ["cluster_01"],
+ "created_by": "user1@email.com",
+ "created_at": "---timestamp---",
+ "updated_at": "---timestamp---",
+ "links”: [
+ {
+ "href" : "/hosts/1",
+ "rel": "self
+ }
+ ]
+ },
+ ...
+ ]
+ queryParameters:
+ name:
+ os_name:
+ owner:
+ mac:
+ description: Lists information for all hosts
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{host_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "host_01",
+ "machine_id": 1,
+ "mac": "---MAC-address--”,
+ "ip": "192.168.1.2"
+ "os": "CentOs",
+ "os_installed": false,
+ "domain": "xxx",
+ "dns": "xxx",
+ "created_by": "user1@email.com",
+ "created_at": "---timestamp---",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Lists information for a specified host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "name": "update_host_name"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "host1"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host properties.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "host_01_new",
+ "mac": "---MAC-address--",
+ "os_name": "CentOs",
+ "os_installed": false
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "itemNotFound",
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Deletes a host (admin only). The host must be not in any cluster.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "poweron": [1],
+ "poweroff": [1],
+ "reset": [1]
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "status": "host <host_id> power<on|off|reset> action sent",
+ "host": {...}
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The host witch ID '$host_id' cannot be found!"
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The host didnot set IPMI info!"
+ }
+ description: Poweron, poweroff, reset this host by IPMI
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /clusters:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster_new",
+ "reinstall_distributed_system": true,
+ "created_at": "2014-10-18 23:16:02",
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:16:39",
+ "owner": "admin@huawei.com",
+ "distributed_system_installed": false,
+ "id": 2
+ }
+ ]
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Lists clusters which the host belongs to
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /config:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ "global": {
+ "language": "EN",
+ "timezone": "PDT",
+ }
+ "partition": {
+ "/var": {
+ "max_size": "20",
+ "size_percentage": "30"
+ },
+ "/home": {
+ "max_size": "20",
+ "size_percentage": "40"
+ }
+ }
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Lists config information for a specified host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config": {
+ "general": {
+ "language": "EN",
+ "timezone": "UTC",
+ "http_proxy": "http://127.0.0.1:3128",
+ "https_proxy": "http://127.0.0.1:3128",
+ "no_proxy": [
+ "127.0.0.1",
+ "compass"
+ ],
+ "ntp_server": "127.0.0.1",
+ "dns_servers": [
+ "127.0.0.1"
+ ],
+ "domain": "ods.com",
+ "search_path": [
+ "ods.com"
+ ],
+ "default_gateway": "127.0.0.1"
+ },
+ "server_credentials": {
+ "username": "root",
+ "password": "root"
+ },
+ "partition": {
+ "/var": {
+ "max_size": "100G",
+ "percentage": 10,
+ "size": "1G"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ …
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set config properties for a specified host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config": {
+ "general": {
+ "language": "EN",
+ "timezone": "UTC",
+ "http_proxy": "http://127.0.0.1:3128",
+ "https_proxy": "http://127.0.0.1:3128",
+ "no_proxy": [
+ "127.0.0.1",
+ "compass"
+ ],
+ "ntp_server": "127.0.0.1",
+ "dns_servers": [
+ "127.0.0.1"
+ ],
+ "domain": "ods.com",
+ "search_path": [
+ "ods.com"
+ ],
+ "default_gateway": "127.0.0.1"
+ },
+ "server_credentials": {
+ "username": "root",
+ "password": "root"
+ },
+ "partition": {
+ "/var": {
+ "max_size": "100G",
+ "percentage": 10,
+ "size": "1G"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ ....
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: update host config properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ ...
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: delete host config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /state:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "state": "INSTALLING",
+ "percentage": 0.5,
+ "severity": "INFO",
+ "message": "-----some--message-----",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: get host state
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "state": "INSTALLING"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "SUCCESSFUL",
+ "percentage": 1,
+ "severity": "INFO",
+ "message": "-----some--message-----",
+ "updated_at": "---timestamp---"
+ }
+
+ OR
+
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "ERROR",
+ "percentage": 0.7,
+ "severity": "ERROR",
+ "message": "---some-error-message---",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host state properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /network:
+ get:
+ body:
+ application/json:
+ schema: |
+ [
+ {
+ "interface": "eth0",
+ "ip": "10.172.20.91",
+ "subnet_id": 1,
+ "is_mgmt": "False",
+ "is_promiscuous": "False"
+ },
+ {
+ "interface": "eth1",
+ "ip": "10.172.20.110",
+ "subnet_id": 1,
+ "is_mgmt": "False",
+ "is_promiscuous": "False"
+ }
+ ]
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "eth0": {
+ "id": 1,
+ "interface": "eth0",
+ "ip": "192.168.10.1",
+ "is_mgmt": true,
+ "is_promiscuous": false,
+ "subnet_id": 1,
+ },
+ "eth1": {
+ "id": 2,
+ "interface": "eth1",
+ "ip": "10.12.123.1",
+ "is_promiscuous": true,
+ "subnet_id": 2,
+ },
+ …..
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Lists network info for a specified host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "interface": "eth0",
+ "ip": "10.145.89.152",
+ "subnet_id": 1,
+ "is_mgmt": "True",
+ "is_promiscuous": "False"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "interface": "eth3",
+ "ip": "12.140.10.1",
+ "is_promiscuous": true,
+ "is_mgmt": false,
+ "subnet_id": 3,
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id ‘some_id’ cannot be found!"
+ }
+ description: Creates an interface config entry
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{interface}:
+ get:
+ description: list host network information
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "interface": "eth1",
+ "ip": "10.145.89.155",
+ "subnet_id": 1,
+ "is_mgmt": "True",
+ "is_promiscuous": "False"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "interface": "eth3",
+ "ip": "12.140.10.2",
+ "is_promiscuous": true,
+ "is_mgmt": false,
+ "subnet_id": 4,
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host network properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "interface": "eth3",
+ "ip": "12.140.10.1",
+ "is_promiscuous”: true,
+ "is_mgmt": false,
+ "subnet_id": 3
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: delete a host network
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/proxy/{path}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "created_at": "2014-10-19 10:50:04",
+ "updated_at": "2014-10-19 10:50:04",
+ "email": "admin@huawei.com",
+ "is_admin": true,
+ "active": true,
+ "id": 1
+ }
+ ]
+ queryParameters:
+ URL:
+ example: http://10.145.88.211/api/proxy/users
+ description: proxy get request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "url": "http://10.145.88.211/api/proxy/subnets"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "subnet": "10.145.86.0/23",
+ "created_at": "2014-10-19 11:25:33",
+ "updated_at": "2014-10-19 11:25:33",
+ "name": "10.145.86.0/23",
+ "id": 3
+ }
+ description: proxy post request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "url": "http://10.145.88.211/api/proxy/subnets/3"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "subnet": "10.145.84.0/23",
+ "created_at": "2014-10-19 11:25:33",
+ "updated_at": "2014-10-19 11:29:08",
+ "name": "10.145.84.0/23",
+ "id": 3
+ }
+ description: proxy put request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "url": "http://10.145.88.211/api/proxy/subnets/3"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.42",
+ "created_at": "2014-10-19 11:31:40",
+ "updated_at": "2014-10-19 11:33:46",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "private"
+ },
+ "id": 3
+ }
+ description: proxy patch request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.42",
+ "created_at": "2014-10-19 11:31:40",
+ "updated_at": "2014-10-19 11:33:46",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "private"
+ },
+ "id": 3
+ }
+ queryParameters:
+ URL:
+ example: http://10.145.88.211/api/proxy/switches/3
+ description: proxy delete request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/host/networks:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "eth1": {
+ "ip": "192.168.100.155",
+ "created_at": "2015-04-17 14:55:55",
+ "is_promiscuous": true,
+ "updated_at": "2015-04-17 14:55:55",
+ "netmask": "255.255.254.0",
+ "is_mgmt": false,
+ "interface": "eth1",
+ "id": 1
+ },
+ "eth0": {
+ "ip": "10.145.89.155",
+ "created_at": "2015-04-17 14:55:55",
+ "is_promiscuous": false,
+ "updated_at": "2015-04-17 14:55:55",
+ "netmask": "255.255.254.0",
+ "is_mgmt": true,
+ "interface": "eth0",
+ "id": 2
+ }
+ }
+ description: List all host networks
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{host_network_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "192.168.100.155",
+ "created_at": "2015-04-17 14:55:55",
+ "is_promiscuous": true,
+ "updated_at: "2015-04-17 14:55:55",
+ "netmask": "255.255.254.0",
+ "is_mgmt": false,
+ "interface": "eth1",
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table HostNetwork: {'id': <host_network_id>}",
+ }
+ description: List specifig host network info
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/host-networks/{host_network_id}:
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "interface": "eth0",
+ "ip": "10.145.88.10"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "192.168.100.159",
+ "created_at": "2015-04-17 14:55:55",
+ "is_promiscuous": true,
+ "updated_at: "2015-04-17 14:55:55",
+ "netmask": "255.255.254.0",
+ "is_mgmt": false,
+ "interface": "eth1",
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ message: "Cannot find the record in table HostNetwork: {'id': <host_network_id>}"
+ }
+ description: Update a specific host network info.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip: "10.145.89.155",
+ "created_at": "2015-04-17 15:44:54"
+ "is_promiscuous": false,
+ "updated_at": "2015-04-17 15:44:54",
+ "netmask": "255.255.254.0",
+ "is_mgmt": false
+ "interface": "eth0",
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ message: "Cannot find the record in table HostNetwork: {'id': <host_network_id>}"
+ }
+ description: Delete a host network.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+
+
+
diff --git a/compass-deck/api/auth_handler.py b/compass-deck/api/auth_handler.py
new file mode 100644
index 0000000..3c22ebb
--- /dev/null
+++ b/compass-deck/api/auth_handler.py
@@ -0,0 +1,49 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from itsdangerous import BadData
+import logging
+import sys
+
+from compass.api import app
+from compass.api import exception_handler
+from compass.api import login_manager
+
+from compass.db.api import user as user_api
+from compass.db.api.user import UserWrapper
+
+
+def authenticate_user(email, password, **kwargs):
+ """Authenticate a user by email and password."""
+ user = user_api.get_user_object(
+ email, **kwargs
+ )
+ user.authenticate(password)
+ return user
+
+
+@login_manager.token_loader
+def load_user_from_token(token):
+ return user_api.get_user_object_from_token(token)
+
+
+@login_manager.header_loader
+def load_user_from_header(header):
+ """Return a user object from token."""
+ return user_api.get_user_object_from_token(header)
+
+
+@login_manager.user_loader
+def load_user(token):
+ return user_api.get_user_object_from_token(token)
diff --git a/compass-deck/api/exception_handler.py b/compass-deck/api/exception_handler.py
new file mode 100644
index 0000000..67c780e
--- /dev/null
+++ b/compass-deck/api/exception_handler.py
@@ -0,0 +1,92 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Exceptions for RESTful API."""
+import logging
+import simplejson as json
+import traceback
+
+from compass.api import app
+from compass.api import utils
+
+
+class HTTPException(Exception):
+ def __init__(self, message, status_code):
+ super(HTTPException, self).__init__(message)
+ self.traceback = traceback.format_exc()
+ self.status_code = status_code
+
+ def to_dict(self):
+ return {'message': str(self)}
+
+
+class ItemNotFound(HTTPException):
+ """Define the exception for referring non-existing object."""
+ def __init__(self, message):
+ super(ItemNotFound, self).__init__(message, 410)
+
+
+class BadRequest(HTTPException):
+ """Define the exception for invalid/missing parameters.
+
+ User making a request in invalid state cannot be processed.
+ """
+ def __init__(self, message):
+ super(BadRequest, self).__init__(message, 400)
+
+
+class Unauthorized(HTTPException):
+ """Define the exception for invalid user login."""
+ def __init__(self, message):
+ super(Unauthorized, self).__init__(message, 401)
+
+
+class UserDisabled(HTTPException):
+ """Define the exception for disabled users."""
+ def __init__(self, message):
+ super(UserDisabled, self).__init__(message, 403)
+
+
+class Forbidden(HTTPException):
+ """Define the exception for invalid permissions."""
+ def __init__(self, message):
+ super(Forbidden, self).__init__(message, 403)
+
+
+class BadMethod(HTTPException):
+ """Define the exception for invoking unsupported methods."""
+ def __init__(self, message):
+ super(BadMethod, self).__init__(message, 405)
+
+
+class ConflictObject(HTTPException):
+ """Define the exception for creating an existing object."""
+ def __init__(self, message):
+ super(ConflictObject, self).__init__(message, 409)
+
+
+@app.errorhandler(Exception)
+def handle_exception(error):
+ if hasattr(error, 'to_dict'):
+ response = error.to_dict()
+ else:
+ response = {'message': str(error)}
+ if app.debug and hasattr(error, 'traceback'):
+ response['traceback'] = error.traceback
+
+ status_code = 400
+ if hasattr(error, 'status_code'):
+ status_code = error.status_code
+
+ return utils.make_json_response(status_code, response)
diff --git a/compass-deck/api/utils.py b/compass-deck/api/utils.py
new file mode 100644
index 0000000..87977cd
--- /dev/null
+++ b/compass-deck/api/utils.py
@@ -0,0 +1,35 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utils for API usage."""
+from flask import make_response
+import simplejson as json
+
+
+def make_json_response(status_code, data):
+ """Wrap json format to the reponse object."""
+
+ result = json.dumps(data, indent=4) + '\r\n'
+ resp = make_response(result, status_code)
+ resp.headers['Content-type'] = 'application/json'
+ return resp
+
+
+def make_csv_response(status_code, csv_data, fname):
+ """Wrap CSV format to the reponse object."""
+ fname = '.'.join((fname, 'csv'))
+ resp = make_response(csv_data, status_code)
+ resp.mimetype = 'text/csv'
+ resp.headers['Content-Disposition'] = 'attachment; filename="%s"' % fname
+ return resp
diff --git a/compass-deck/api/v1/__init__.py b/compass-deck/api/v1/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/api/v1/__init__.py
diff --git a/compass-deck/api/v1/api.py b/compass-deck/api/v1/api.py
new file mode 100644
index 0000000..9dbc548
--- /dev/null
+++ b/compass-deck/api/v1/api.py
@@ -0,0 +1,248 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define all the RestfulAPI entry points."""
+import logging
+import simplejson as json
+
+from flask import Blueprint
+from flask import request
+
+from flask.ext.restful import Resource
+
+from compass.api.exception import BadRequest
+from compass.api.exception import Forbidden
+from compass.api.exception import ItemNotFound
+from compass.api.exception import Unauthorized
+from compass.api.restfulAPI import CompassApi
+from compass.api import utils
+
+from compass.db import db_api
+from compass.db.exception import InvalidParameter
+from compass.db.exception import RecordNotExists
+
+
+v1_app = Blueprint('v1_app', __name__)
+api = CompassApi(v1_app)
+PREFIX = '/v1.0'
+
+
+@v1_app.route('/users', methods=['GET'])
+def list_users():
+ """List details of all users filtered by user email and admin role."""
+
+ emails = request.args.getlist('email')
+ is_admin = request.args.get('admin')
+ filters = {}
+
+ if emails:
+ filters['email'] = emails
+
+ if is_admin is not None:
+ if is_admin == 'true':
+ filters['is_admin'] = True
+ elif is_admin == 'false':
+ filters['is_admin'] = False
+
+ users_list = db_api.user.list_users(filters)
+
+ return utils.make_json_response(200, users_list)
+
+
+class User(Resource):
+ ENDPOINT = PREFIX + '/users'
+
+ def get(self, user_id):
+ """Get user's information for the specified ID."""
+ try:
+ user_data = db_api.user.get_user(user_id)
+ logging.debug("user_data is===>%s", user_data)
+
+ except RecordNotExists as ex:
+ error_msg = ex.message
+ raise ItemNotFound(error_msg)
+
+ return utils.make_json_response(200, user_data)
+
+
+class Adapter(Resource):
+ ENDPOINT = PREFIX + "/adapters"
+
+ def get(self, adapter_id):
+ """Get information for a specified adapter."""
+
+ try:
+ adapter_info = db_api.adapter.get_adapter(adapter_id)
+ except RecordNotExists as ex:
+ error_msg = ex.message
+ raise ItemNotFound(error_msg)
+
+ return utils.make_json_response(200, adapter_info)
+
+
+@v1_app.route('/adapters', methods=['GET'])
+def list_adapters():
+ """List details of all adapters filtered by the adapter name(s)."""
+
+ names = request.args.getlist('name')
+ filters = {}
+ if names:
+ filters['name'] = names
+
+ adapters_list = db_api.adapter.list_adapters(filters)
+ return utils.make_json_response(200, adapters_list)
+
+
+@v1_app.route('/adapters/<int:adapter_id>/config-schema', methods=['GET'])
+def get_adapter_config_schema(adapter_id):
+ """Get the config schema for a specified adapter."""
+
+ os_id = request.args.get("os-id", type=int)
+
+ try:
+ schema = db_api.adapter.get_adapter_config_schema(adapter_id, os_id)
+ except RecordNotExists as ex:
+ raise ItemNotFound(ex.message)
+
+ return utils.make_json_response(200, schema)
+
+
+@v1_app.route('/adapters/<int:adapter_id>/roles', methods=['GET'])
+def get_adapter_roles(adapter_id):
+ """Get roles for a specified adapter."""
+
+ try:
+ roles = db_api.adapter.get_adapter(adapter_id, True)
+ except RecordNotExists as ex:
+ raise ItemNotFound(ex.message)
+
+ return utils.make_json_response(200, roles)
+
+
+class Cluster(Resource):
+ def get(self, cluster_id):
+ """Get information for a specified cluster."""
+
+ try:
+ cluster_info = db_api.cluster.get_cluster(cluster_id)
+
+ except RecordNotExists as ex:
+ error_msg = ex.message
+ raise ItemNotFound(error_msg)
+
+ return utils.make_json_response(200, cluster_info)
+
+
+@v1_app.route('/clusters/<int:cluster_id>/config', methods=['PUT', 'PATCH'])
+def add_cluster_config(cluster_id):
+ """Update the config information for a specified cluster."""
+ config = json.loads(request.data)
+ if not config:
+ raise BadRequest("Config cannot be None!")
+
+ root_elems = ['os_config', 'package_config']
+ if len(config.keys()) != 1 or config.keys()[0] not in root_elems:
+ error_msg = ("Config root elements must be either"
+ "'os_config' or 'package_config'")
+ raise BadRequest(error_msg)
+
+ result = None
+ is_patch_method = request.method == 'PATCH'
+ try:
+ if "os_config" in config:
+ result = db_api.cluster\
+ .update_cluster_config(cluster_id,
+ 'os_config',
+ config,
+ patch=is_patch_method)
+ elif "package_config" in config:
+ result = db_api.cluster\
+ .update_cluster_config(cluster_id,
+ 'package_config', config,
+ patch=is_patch_method)
+
+ except InvalidParameter as ex:
+ raise BadRequest(ex.message)
+
+ except RecordNotExists as ex:
+ raise ItemNotFound(ex.message)
+
+ return utils.make_json_response(200, result)
+
+
+api.add_resource(User,
+ '/users',
+ '/users/<int:user_id>')
+api.add_resource(Adapter,
+ '/adapters',
+ '/adapters/<int:adapter_id>')
+api.add_resource(Cluster,
+ '/clusters',
+ '/clusters/<int:cluster_id>')
+
+
+@v1_app.errorhandler(ItemNotFound)
+def handle_not_exist(error, failed_objs=None):
+ """Handler of ItemNotFound Exception."""
+
+ message = {'type': 'itemNotFound',
+ 'message': error.message}
+
+ if failed_objs and isinstance(failed_objs, dict):
+ message.update(failed_objs)
+
+ return utils.make_json_response(404, message)
+
+
+@v1_app.errorhandler(Unauthorized)
+def handle_invalid_user(error, failed_objs=None):
+ """Handler of Unauthorized Exception."""
+
+ message = {'type': 'unathorized',
+ 'message': error.message}
+
+ if failed_objs and isinstance(failed_objs, dict):
+ message.update(failed_objs)
+
+ return utils.make_json_response(401, message)
+
+
+@v1_app.errorhandler(Forbidden)
+def handle_no_permission(error, failed_objs=None):
+ """Handler of Forbidden Exception."""
+
+ message = {'type': 'Forbidden',
+ 'message': error.message}
+
+ if failed_objs and isinstance(failed_objs, dict):
+ message.update(failed_objs)
+
+ return utils.make_json_response(403, message)
+
+
+@v1_app.errorhandler(BadRequest)
+def handle_bad_request(error, failed_objs=None):
+ """Handler of badRequest Exception."""
+
+ message = {'type': 'badRequest',
+ 'message': error.message}
+
+ if failed_objs and isinstance(failed_objs, dict):
+ message.update(failed_objs)
+
+ return utils.make_json_response(400, message)
+
+
+if __name__ == '__main__':
+ v1_app.run(debug=True)
diff --git a/compass-deck/apiclient/__init__.py b/compass-deck/apiclient/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/apiclient/__init__.py
diff --git a/compass-deck/apiclient/example.py b/compass-deck/apiclient/example.py
new file mode 100755
index 0000000..4c01b98
--- /dev/null
+++ b/compass-deck/apiclient/example.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+# copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Example code to deploy a cluster by compass client api."""
+import os
+import re
+import sys
+import time
+
+# from compass.apiclient.restful import Client
+from restful import Client
+
+COMPASS_SERVER_URL = 'http://localhost/api'
+COMPASS_LOGIN_EMAIL = 'admin@huawei.com'
+COMPASS_LOGIN_PASSWORD = 'admin'
+SWITCH_IP = '172.29.8.40'
+SWITCH_SNMP_VERSION = '2c'
+SWITCH_SNMP_COMMUNITY = 'public'
+CLUSTER_NAME = 'test_cluster'
+HOST_NAME_PREFIX = 'host'
+SERVICE_USERNAME = 'service'
+SERVICE_PASSWORD = 'service'
+CONSOLE_USERNAME = 'console'
+CONSOLE_PASSWORD = 'console'
+HA_VIP = ''
+
+MANAGEMENT_IP_START = '10.145.88.130'
+MANAGEMENT_IP_END = '10.145.88.254'
+MANAGEMENT_IP_GATEWAY = '10.145.88.1'
+MANAGEMENT_NETMASK = '255.255.255.0'
+MANAGEMENT_NIC = 'eth0'
+MANAGEMENT_PROMISC = 0
+TENANT_IP_START = '192.168.10.130'
+TENANT_IP_END = '192.168.10.255'
+TENANT_IP_GATEWAY = '192.168.10.1'
+TENANT_NETMASK = '255.255.255.0'
+TENANT_NIC = 'eth0'
+TENANT_PROMISC = 0
+PUBLIC_IP_START = '12.234.32.130'
+PUBLIC_IP_END = '12.234.32.255'
+PUBLIC_IP_GATEWAY = '12.234.32.1'
+PUBLIC_NETMASK = '255.255.255.0'
+PUBLIC_NIC = 'eth1'
+PUBLIC_PROMISC = 1
+STORAGE_IP_START = '172.16.100.130'
+STORAGE_IP_END = '172.16.100.255'
+STORAGE_NETMASK = '255.255.255.0'
+STORAGE_IP_GATEWAY = '172.16.100.1'
+STORAGE_NIC = 'eth0'
+STORAGE_PROMISC = 0
+HOME_PERCENTAGE = 5
+TMP_PERCENTAGE = 5
+VAR_PERCENTAGE = 10
+HOST_OS = 'CentOS-6.5-x86_64'
+
+
+PRESET_VALUES = {
+ 'LANGUAGE': 'EN',
+ 'TIMEZONE': 'GMT',
+ 'HTTPS_PROXY': 'http://10.145.89.100:3128',
+ 'NO_PROXY': ['127.0.0.1'],
+ 'DOMAIN': 'ods.com',
+ 'NAMESERVERS': ['10.145.89.100'],
+ 'NTP_SERVER': '10.145.89.100',
+ 'GATEWAY': '10.145.88.1',
+ 'PROXY': 'http://10.145.89.100:3128',
+ 'OS_NAME_PATTERN': 'CentOS.*',
+ 'ADAPTER_NAME': 'openstack_icehouse',
+ 'FLAVOR_PATTERN': 'allinone.*',
+ 'ROLES_LIST': ['allinone-compute'],
+ 'MACHINES_TO_ADD': ['00:0c:29:a7:ea:4b'],
+ 'BUILD_TIMEOUT': 60,
+ 'SEARCH_PATH': ['ods.com'],
+ 'SERVER_USERNAME': 'root',
+ 'SERVER_PASSWORD': 'root'
+}
+for v in PRESET_VALUES:
+ if v in os.environ.keys():
+ PRESET_VALUES[v] = os.environ.get(v)
+ print (v + PRESET_VALUES[v] + " is set by env variables")
+ else:
+ print (PRESET_VALUES[v])
+
+# instantiate a client
+client = Client(COMPASS_SERVER_URL)
+
+# login
+status, response = client.login(COMPASS_LOGIN_EMAIL, COMPASS_LOGIN_PASSWORD)
+print '============================================================'
+print 'login status: %s response: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+# list all switches
+status, response = client.list_switches()
+print '============================================================='
+print 'get all switches status: %s response: %s' % (status, response)
+
+# add a switch
+status, response = client.add_switch(
+ SWITCH_IP,
+ SWITCH_SNMP_VERSION,
+ SWITCH_SNMP_COMMUNITY
+)
+print '============================================'
+print 'adding a switch..status: %s, response: %s' % (status, response)
+
+# if switch already exists, get one from all switches
+switch = None
+if status < 400:
+ switch = response
+else:
+ status, response = client.list_switches()
+ print '========================================='
+ print 'list switches status %s response %s' % (status, response)
+ if status >= 400:
+ sys.exit(1)
+ for switch_ in response:
+ if switch_['ip'] == SWITCH_IP:
+ switch = switch_
+ break
+
+switch_id = switch['id']
+switch_ip = switch['ip']
+print '======================'
+print 'switch has been set as %s' % switch_ip
+
+# wait till switch state becomes under_monitoring
+while switch['state'] != 'under_monitoring':
+ print 'waiting for state to become under_monitoring'
+ client.poll_switch(switch_id)
+ status, resp = client.get_switch(switch_id)
+ print '====================================='
+ print 'poll switch status %s response %s' % (status, resp)
+ switch = resp
+ print 'switch is in state: %s' % switch['state']
+ time.sleep(5)
+
+print '========================================='
+print 'switch state now is %s' % (switch['state'])
+
+# create a machine list
+machine_macs = {}
+machines = {}
+for machine in PRESET_VALUES['MACHINES_TO_ADD']:
+ status, response = client.list_machines(mac=machine)
+ print '============================================'
+ print 'list machines status %s response %s' % (status, response)
+ if status >= 400:
+ sys.exit(1)
+ if status == 200 and response != []:
+ machine_id = response[0]['id']
+ machine_macs[machine_id] = response[0]['mac']
+ machines = response
+
+print '================================='
+print 'found machines are : %s' % machines
+
+machines_to_add = PRESET_VALUES['MACHINES_TO_ADD']
+if set(machine_macs.values()) != set(machines_to_add):
+ print 'only found macs %s while expected are %s' % (
+ machine_macs.values(), machines_to_add)
+ sys.exit(1)
+
+# list all adapters
+status, response = client.list_adapters()
+print '==============================='
+print 'all adapters are: %s' % response
+if status >= 400:
+ sys.exit(1)
+
+adapters = response
+adapter_id = None
+os_id = None
+flavor_id = None
+adapter_name = PRESET_VALUES['ADPATER_NAME']
+os_pattern = re.compile(PRESET_VALUES['OS_NAME_PATTERN'])
+flavor_pattern = re.compile(PRESET_VALUES['FLAVOR_PATTERN'])
+for adapter in adapters:
+ if adapter_name == adapter['name']:
+ adapter_id = adapter['id']
+ for supported_os in adapter['supported_oses']:
+ if os_pattern.match(supported_os['name']):
+ os_id = supported_os['id']
+ break
+ for flavor in adapter['flavors']:
+ if flavor_pattern.match(flavor['name']):
+ flavor_id = flavor['id']
+ if adapter_id and os_id and flavor_id:
+ break
+
+print '======================================================='
+print 'using adapter %s os %s flavor %s to deploy cluster' % (
+ adapter_id, os_id, flavor_id
+)
+
+# add a cluster
+status, response = client.add_cluster(
+ CLUSTER_NAME,
+ adapter_id,
+ os_id,
+ flavor_id
+)
+print '==============================================================='
+print 'add cluster %s status %s: %s' % (CLUSTER_NAME, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.list_clusters(name=CLUSTER_NAME)
+print '================================================================'
+print 'list clusters %s status %s: %s' % (CLUSTER_NAME, status, response)
+if status >= 400:
+ sys.exit(1)
+
+cluster = response[0]
+cluster_id = cluster['id']
+
+print '=================='
+print 'cluster is %s' % cluster
+
+# Add hosts to the cluster
+machines_dict = {}
+machine_id_list = []
+for machine in machines:
+ id_mapping = {}
+ id_mapping['machine_id'] = machine['id']
+ machine_id_list.append(id_mapping)
+
+machines_dict['machines'] = machine_id_list
+
+status, response = client.add_hosts_to_cluster(
+ cluster_id, machines_dict
+)
+print '==================================='
+print 'add hosts %s to cluster status %s response %s' % (
+ machines_dict, status, response)
+if status >= 400:
+ sys.exit(1)
+
+# Add two subnets
+subnet_1 = '10.145.89.0/24'
+subnet_2 = '192.168.100.0/24'
+
+status, response = client.add_subnet(subnet_1)
+print '=================='
+print 'add subnet %s status %s: %s' % (subnet_1, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.add_subnet(subnet_2)
+print '=================='
+print 'add subnet %s status %s: %s' % (subnet_2, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, subnet1 = client.list_subnets(subnet=subnet_1)
+print '==========================================================='
+print 'list subnet %s status %s: %s' % (subnet_1, status, subnet1)
+if status >= 400:
+ sys.exit(1)
+
+status, subnet2 = client.list_subnets(subnet=subnet_2)
+print '==========================================================='
+print 'list subnet %s status %s: %s' % (subnet_2, status, subnet2)
+if status >= 400:
+ sys.exit(1)
+
+subnet1_id = subnet1[0]['id']
+subnet2_id = subnet2[0]['id']
+print '========================'
+print 'subnet1 has id: %s, subnet is %s' % (subnet1_id, subnet1)
+print 'subnet2 has id: %s, subnet is %s' % (subnet2_id, subnet2)
+
+# Add host network
+status, response = client.list_cluster_hosts(cluster_id)
+print '================================================'
+print 'list cluster hosts status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+host = response[0]
+host_id = host['id']
+print '=================='
+print 'host is: %s' % host
+
+status, response = client.add_host_network(
+ host_id,
+ 'eth0',
+ '10.145.89.200',
+ subnet1_id,
+ is_mgmt=True
+)
+print '======================='
+print 'add eth0 network status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.add_host_network(
+ host_id,
+ 'eth1',
+ '192.168.100.200',
+ subnet2_id,
+ is_promiscuous=True
+)
+print '======================='
+print 'add eth1 network status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+# Update os config to cluster
+cluster_os_config = {
+ 'general': {
+ 'language': PRESET_VALUES['LANGUAGE'],
+ 'timezone': PRESET_VALUES['TIMEZONE'],
+ 'http_proxy': PRESET_VALUES['PROXY'],
+ 'https_proxy': PRESET_VALUES['HTTPS_PROXY'],
+ 'no_proxy': PRESET_VALUES['NO_PROXY'],
+ 'ntp_server': PRESET_VALUES['NTP_SERVER'],
+ 'dns_servers': PRESET_VALUES['NAMESERVERS'],
+ 'domain': PRESET_VALUES['DOMAIN'],
+ 'search_path': PRESET_VALUES['SEARCH_PATH'],
+ 'default_gateway': PRESET_VALUES['GATEWAY']
+ },
+ 'server_credentials': {
+ 'username': PRESET_VALUES['SERVER_USERNAME'],
+ 'password': PRESET_VALUES['SERVER_PASSWORD']
+ },
+ 'partition': {
+ '/var': {
+ 'percentage': VAR_PERCENTAGE,
+ },
+ '/home': {
+ 'percentage': HOME_PERCENTAGE,
+ }
+ }
+}
+
+
+cluster_package_config = {
+ 'security': {
+ 'service_credentials': {
+ 'image': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'compute': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'dashboard': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'identity': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'metering': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'rabbitmq': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'volume': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'mysql': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ }
+ },
+ 'console_credentials': {
+ 'admin': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'compute': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'dashboard': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'image': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'metering': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'network': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'object-store': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'volume': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ }
+ }
+ },
+ 'network_mapping': {
+ 'management': MANAGEMENT_NIC,
+ 'tenant': TENANT_NIC,
+ 'storage': STORAGE_NIC,
+ 'public': PUBLIC_NIC
+ }
+}
+
+status, response = client.update_cluster_config(
+ cluster_id,
+ cluster_os_config,
+ cluster_package_config
+)
+
+print '======================================='
+print 'cluster %s update status %s: %s' % (
+ cluster_id, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.update_cluster_host(
+ cluster_id, host_id, roles=PRESET_VALUES['ROLES_LIST'])
+print '================================================='
+print 'update cluster host %s/%s status %s: %s' % (
+ cluster_id, host_id, status, response)
+if status >= 400:
+ sys.exit(1)
+
+# Review and deploy
+status, response = client.review_cluster(
+ cluster_id, review={'hosts': [host_id]})
+print '======================================='
+print 'reviewing cluster status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.deploy_cluster(
+ cluster_id, deploy={'hosts': [host_id]})
+print '======================================='
+print 'deploy cluster status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
diff --git a/compass-deck/apiclient/restful.py b/compass-deck/apiclient/restful.py
new file mode 100644
index 0000000..bb82922
--- /dev/null
+++ b/compass-deck/apiclient/restful.py
@@ -0,0 +1,1102 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass api client library.
+"""
+
+import json
+import logging
+import requests
+
+
+class Client(object):
+ """compass restful api wrapper"""
+
+ def __init__(self, url, headers=None, proxies=None, stream=None):
+ logging.info('create api client %s', url)
+ self.url_ = url
+ self.session_ = requests.Session()
+
+ if headers:
+ self.session_.headers.update(headers)
+ self.session_.headers.update({
+ 'Accept': 'application/json'
+ })
+
+ if proxies is not None:
+ self.session_.proxies = proxies
+
+ if stream is not None:
+ self.session_.stream = stream
+
+ def __del__(self):
+ self.session_.close()
+
+ @classmethod
+ def _get_response(cls, resp):
+ response_object = {}
+ try:
+ response_object = resp.json()
+ except Exception as error:
+ logging.error('failed to load object from %s: %s',
+ resp.url, resp.content)
+ logging.exception(error)
+ response_object['status'] = 'Json Parsing Failed'
+ response_object['message'] = resp.content
+
+ return resp.status_code, response_object
+
+ def _get(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('get %s with data %s', url, data)
+ if data:
+ resp = self.session_.get(url, params=data)
+ else:
+ resp = self.session_.get(url)
+
+ return self._get_response(resp)
+
+ def _post(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('post %s with data %s', url, data)
+ if data:
+ resp = self.session_.post(url, json.dumps(data))
+ else:
+ resp = self.session_.post(url)
+
+ return self._get_response(resp)
+
+ def _put(self, req_url, data=None):
+ """encapsulate put method."""
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('put %s with data %s', url, data)
+ if data:
+ resp = self.session_.put(url, json.dumps(data))
+ else:
+ resp = self.session_.put(url)
+
+ return self._get_response(resp)
+
+ def _patch(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('patch %s with data %s', url, data)
+ if data:
+ resp = self.session_.patch(url, json.dumps(data))
+ else:
+ resp = self.session_.patch(url)
+
+ return self._get_response(resp)
+
+ def _delete(self, req_url):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('delete %s', url)
+ return self._get_response(self.session_.delete(url))
+
+ def login(self, email, password):
+ credential = {}
+ credential['email'] = email
+ credential['password'] = password
+ return self._post('/users/login', data=credential)
+
+ def get_token(self, email, password):
+ credential = {}
+ credential['email'] = email
+ credential['password'] = password
+ status, resp = self._post('/users/token', data=credential)
+ if status < 400:
+ self.session_.headers.update({'X-Auth-Token': resp['token']})
+ return status, resp
+
+ def get_users(self):
+ users = self._get('/users')
+ return users
+
+ def list_switches(
+ self,
+ switch_ips=None,
+ switch_ip_networks=None):
+ """list switches."""
+ params = {}
+ if switch_ips:
+ params['switchIp'] = switch_ips
+
+ if switch_ip_networks:
+ params['switchIpNetwork'] = switch_ip_networks
+
+ switchlist = self._get('/switches', data=params)
+ return switchlist
+
+ def get_switch(self, switch_id):
+ return self._get('/switches/%s' % switch_id)
+
+ def add_switch(
+ self,
+ switch_ip,
+ version=None,
+ community=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['ip'] = switch_ip
+ data['credentials'] = {}
+ if version:
+ data['credentials']['version'] = version
+
+ if community:
+ data['credentials']['community'] = community
+
+ return self._post('/switches', data=data)
+
+ def update_switch(self, switch_id, state='initialized',
+ version='2c', community='public', raw_data={}):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ data['credentials'] = {}
+ if version:
+ data['credentials']['version'] = version
+
+ if community:
+ data['credentials']['community'] = community
+
+ if state:
+ data['state'] = state
+
+ return self._put('/switches/%s' % switch_id, data=data)
+
+ def delete_switch(self, switch_id):
+ return self._delete('/switches/%s' % switch_id)
+
+ def list_switch_machines(self, switch_id, port=None, vlans=None,
+ tag=None, location=None):
+ data = {}
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/switches/%s/machines' % switch_id, data=data)
+
+ def get_switch_machine(self, switch_id, machine_id):
+ return self._get('/switches/%s/machines/%s' % (switch_id, machine_id))
+
+ def list_switch_machines_hosts(self, switch_id, port=None, vlans=None,
+ mac=None, tag=None, location=None,
+ os_name=None, os_id=None):
+
+ data = {}
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/switches/%s/machines-hosts' % switch_id, data=data)
+
+ def add_switch_machine(self, switch_id, mac=None, port=None,
+ vlans=None, ipmi_credentials=None,
+ tag=None, location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if mac:
+ data['mac'] = mac
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._post('/switches/%s/machines' % switch_id, data=data)
+
+ def update_switch_machine(self, switch_id, machine_id, port=None,
+ vlans=None, ipmi_credentials=None, tag=None,
+ location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._put('/switches/%s/machines/%s' %
+ (switch_id, machine_id), data=data)
+
+ def delete_switch_machine(self, switch_id, machine_id):
+ return self._delete('/switches/%s/machines/%s' %
+ (switch_id, machine_id))
+
+ # test these
+ def poll_switch(self, switch_id):
+ data = {}
+ data['find_machines'] = None
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def add_group_switch_machines(self, switch_id, group_machine_ids):
+ data = {}
+ data['add_machines'] = group_machine_ids
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def remove_group_switch_machines(self, switch_id, group_machine_ids):
+ data = {}
+ data['remove_machines'] = group_machine_ids
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def update_group_switch_machines(self, switch_id, group_machines):
+ data = {}
+ data['set_machines'] = group_machines
+ return self._post('/switches/%s/action' % switch_id, data=data)
+ # end
+
+ def list_switchmachines(self, switch_ip_int=None, port=None, vlans=None,
+ mac=None, tag=None, location=None):
+ data = {}
+ if switch_ip_int:
+ data['switch_ip_int'] = switch_ip_int
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/switch-machines', data=data)
+
+ def list_switchmachines_hosts(self, switch_ip_int=None, port=None,
+ vlans=None, mac=None, tag=None,
+ location=None, os_name=None, os_id=None):
+
+ data = {}
+ if switch_ip_int:
+ data['switch_ip_int'] = switch_ip_int
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/switches-machines-hosts', data=data)
+
+ def show_switchmachine(self, switchmachine_id):
+ return self._get('/switch-machines/%s' % switchmachine_id)
+
+ def update_switchmachine(self, switchmachine_id,
+ port=None, vlans=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ return self._put('/switch-machines/%s' % switchmachine_id, data=data)
+
+ def patch_switchmachine(self, switchmachine_id,
+ vlans=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ elif vlans:
+ data['vlans'] = vlans
+
+ return self._patch('/switch-machines/%s' % switchmachine_id, data=data)
+
+ def delete_switchmachine(self, switchmachine_id):
+ return self._delete('/switch-machines/%s' % switchmachine_id)
+
+ def list_machines(self, mac=None, tag=None, location=None):
+ data = {}
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/machines', data=data)
+
+ def get_machine(self, machine_id):
+ data = {}
+ if id:
+ data['id'] = id
+
+ return self._get('/machines/%s' % machine_id, data=data)
+
+ def update_machine(self, machine_id, ipmi_credentials=None, tag=None,
+ location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._put('/machines/%s' % machine_id, data=data)
+
+ def patch_machine(self, machine_id, ipmi_credentials=None,
+ tag=None, location=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._patch('/machines/%s' % machine_id, data=data)
+
+ def delete_machine(self, machine_id):
+ return self._delete('machines/%s' % machine_id)
+
+ def list_subnets(self, subnet=None, name=None):
+ data = {}
+ if subnet:
+ data['subnet'] = subnet
+
+ if name:
+ data['name'] = name
+
+ return self._get('/subnets', data=data)
+
+ def get_subnet(self, subnet_id):
+ return self._get('/subnets/%s' % subnet_id)
+
+ def add_subnet(self, subnet, name=None, raw_data=None):
+ data = {}
+ data['subnet'] = subnet
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if name:
+ data['name'] = name
+
+ return self._post('/subnets', data=data)
+
+ def update_subnet(self, subnet_id, subnet=None,
+ name=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if subnet:
+ data['subnet'] = subnet
+
+ if name:
+ data['name'] = name
+ return self._put('/subnets/%s' % subnet_id, data=data)
+
+ def delete_subnet(self, subnet_id):
+ return self._delete('/subnets/%s' % subnet_id)
+
+ def list_adapters(self, name=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ return self._get('/adapters', data=data)
+
+ def get_adapter(self, adapter_id):
+ return self._get('/adapters/%s' % adapter_id)
+
+ def get_adapter_roles(self, adapter_id):
+ return self._get('/adapters/%s/roles' % adapter_id)
+
+ def get_adapter_metadata(self, adapter_id):
+ return self._get('/adapters/%s/metadata' % adapter_id)
+
+ def get_os_metadata(self, os_id):
+ return self._get('/oses/%s/metadata' % os_id)
+
+ def list_clusters(self, name=None, os_name=None,
+ owner=None,
+ adapter_id=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if owner:
+ data['owner'] = owner
+
+ if adapter_id:
+ data['adapter_id'] = adapter_id
+
+ return self._get('/clusters', data=data)
+
+ def get_cluster(self, cluster_id):
+ return self._get('/clusters/%s' % cluster_id)
+
+ def add_cluster(self, name, adapter_id, os_id,
+ flavor_id=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if flavor_id:
+ data['flavor_id'] = flavor_id
+ data['name'] = name
+ data['adapter_id'] = adapter_id
+ data['os_id'] = os_id
+
+ return self._post('/clusters', data=data)
+
+ def update_cluster(self, cluster_id, name=None,
+ reinstall_distributed_system=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_distributed_system:
+ data['reinstall_distributed_system'] = (
+ reinstall_distributed_system
+ )
+ return self._put('/clusters/%s' % cluster_id, data=data)
+
+ def delete_cluster(self, cluster_id):
+ return self._delete('/clusters/%s' % cluster_id)
+
+ def get_cluster_config(self, cluster_id):
+ return self._get('/clusters/%s/config' % cluster_id)
+
+ def get_cluster_metadata(self, cluster_id):
+ return self._get('/clusters/%s/metadata' % cluster_id)
+
+ def update_cluster_config(self, cluster_id, os_config=None,
+ package_config=None, config_step=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ if config_step:
+ data['config_step'] = config_step
+
+ return self._put('/clusters/%s/config' % cluster_id, data=data)
+
+ def patch_cluster_config(self, cluster_id, os_config=None,
+ package_config=None, config_step=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ if config_step:
+ data['config_step'] = config_step
+
+ return self._patch('/clusters/%s/config' % cluster_id, data=data)
+
+ def delete_cluster_config(self, cluster_id):
+ return self._delete('/clusters/%s/config' % cluster_id)
+
+ # test these
+ def add_hosts_to_cluster(self, cluster_id, hosts):
+ data = {}
+ data['add_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def set_hosts_in_cluster(self, cluster_id, hosts):
+ data = {}
+ data['set_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def remove_hosts_from_cluster(self, cluster_id, hosts):
+ data = {}
+ data['remove_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def review_cluster(self, cluster_id, review={}):
+ data = {}
+ data['review'] = review
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def deploy_cluster(self, cluster_id, deploy={}):
+ data = {}
+ data['deploy'] = deploy
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def redeploy_cluster(self, cluster_id, deploy={}):
+ data = {}
+ data['redeploy'] = deploy
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def get_cluster_state(self, cluster_id):
+ return self._get('/clusters/%s/state' % cluster_id)
+
+ def list_cluster_hosts(self, cluster_id):
+ return self._get('/clusters/%s/hosts' % cluster_id)
+
+ def list_clusterhosts(self):
+ return self._get('/clusterhosts')
+
+ def get_cluster_host(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s' % (cluster_id, host_id))
+
+ def get_clusterhost(self, clusterhost_id):
+ return self._get('/clusterhosts/%s' % clusterhost_id)
+
+ def add_cluster_host(self, cluster_id, machine_id=None, name=None,
+ reinstall_os=None, raw_data=None):
+ data = {}
+ data['machine_id'] = machine_id
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_os:
+ data['reinstall_os'] = reinstall_os
+
+ return self._post('/clusters/%s/hosts' % cluster_id, data=data)
+
+ def delete_cluster_host(self, cluster_id, host_id):
+ return self._delete('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id))
+
+ def delete_clusterhost(self, clusterhost_id):
+ return self._delete('/clusterhosts/%s' % clusterhost_id)
+
+ def get_cluster_host_config(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id))
+
+ def get_clusterhost_config(self, clusterhost_id):
+ return self._get('/clusterhosts/%s/config' % clusterhost_id)
+
+ def update_cluster_host_config(self, cluster_id, host_id,
+ os_config=None,
+ package_config=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._put('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id), data=data)
+
+ def update_clusterhost_config(self, clusterhost_id, os_config=None,
+ package_config=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._put('/clusterhosts/%s/config' % clusterhost_id,
+ data=data)
+
+ def patch_cluster_host_config(self, cluster_id, host_id,
+ os_config=None,
+ package_config=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._patch('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id), data=data)
+
+ def patch_clusterhost_config(self, clusterhost_id, os_config=None,
+ package_config=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._patch('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def delete_cluster_host_config(self, cluster_id, host_id):
+ return self._delete('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id))
+
+ def delete_clusterhost_config(self, clusterhost_id):
+ return self._delete('/clusterhosts/%s/config' % clusterhost_id)
+
+ def get_cluster_host_state(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s/state' %
+ (cluster_id, host_id))
+
+ def update_cluster_host(self, cluster_id, host_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id), data=data)
+
+ def update_clusterhost(self, clusterhost_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def patch_cluster_host(self, cluster_id, host_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._patch('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id), data=data)
+
+ def patch_clusterhost(self, clusterhost_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._patch('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def get_clusterhost_state(self, clusterhost_id):
+ return self._get('/clusterhosts/%s/state' % clusterhost_id)
+
+ def update_cluster_host_state(self, cluster_id, host_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/clusters/%s/hosts/%s/state' % (cluster_id, host_id),
+ data=data)
+
+ def update_clusterhost_state(self, clusterhost_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/clusterhosts/%s/state' % clusterhost_id, data=data)
+
+ def list_hosts(self, name=None, os_name=None, owner=None, mac=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if owner:
+ data['owner'] = owner
+
+ if mac:
+ data['mac'] = mac
+
+ return self._get('/hosts', data=data)
+
+ def get_host(self, host_id):
+ return self._get('/hosts/%s' % host_id)
+
+ def list_machines_or_hosts(self, mac=None, tag=None,
+ location=None, os_name=None,
+ os_id=None):
+ data = {}
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/machines-hosts', data=data)
+
+ def get_machine_or_host(self, host_id):
+ return self._get('/machines-hosts/%s' % host_id)
+
+ def update_host(self, host_id, name=None,
+ reinstall_os=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_os:
+ data['reinstall_os'] = reinstall_os
+
+ return self._put('/hosts/%s' % host_id, data=data)
+
+ def delete_host(self, host_id):
+ return self._delete('/hosts/%s' % host_id)
+
+ def get_host_clusters(self, host_id):
+ return self._get('/hosts/%s/clusters' % host_id)
+
+ def get_host_config(self, host_id):
+ return self._get('/hosts/%s/config' % host_id)
+
+ def update_host_config(self, host_id, os_config, raw_data=None):
+ data = {}
+ data['os_config'] = os_config
+ if raw_data:
+ data.update(raw_data)
+
+ return self._put('/hosts/%s/config' % host_id, data=data)
+
+ def patch_host_config(self, host_id, os_config, raw_data=None):
+ data = {}
+ data['os_config'] = os_config
+ if raw_data:
+ data.update(raw_data)
+
+ return self._patch('/hosts/%s/config' % host_id, data=data)
+
+ def delete_host_config(self, host_id):
+ return self._delete('/hosts/%s/config' % host_id)
+
+ def list_host_networks(self, host_id, interface=None, ip=None,
+ subnet=None, is_mgmt=None, is_promiscuous=None):
+ data = {}
+ if interface:
+ data['interface'] = interface
+
+ if ip:
+ data['ip'] = ip
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._get('/hosts/%s/networks' % host_id, data=data)
+
+ def list_all_host_networks(self, interface=None, ip=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None):
+ data = {}
+ if interface:
+ data['interface'] = interface
+
+ if ip:
+ data['ip'] = ip
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._get('/host-networks', data=data)
+
+ def get_host_network(self, host_id, host_network_id):
+ return self._get('/hosts/%s/networks/%s' %
+ (host_id, host_network_id))
+
+ def get_network_for_all_hosts(self, host_network_id):
+ return self._get('/host-networks/%s' % host_network_id)
+
+ def add_host_network(self, host_id, interface, ip, subnet_id,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ data['interface'] = interface
+ data['ip'] = ip
+ data['subnet_id'] = subnet_id
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._post('/hosts/%s/networks' % host_id, data=data)
+
+ def update_host_network(self, host_id, host_network_id,
+ ip=None, subnet_id=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ip:
+ data['ip'] = ip
+
+ if subnet_id:
+ data['subnet_id'] = subnet_id
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._put('/hosts/%s/networks/%s' %
+ (host_id, host_network_id), data=data)
+
+ def update_hostnetwork(self, host_network_id, ip=None,
+ subnet_id=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ip:
+ data['ip'] = ip
+
+ if subnet_id:
+ data['subnet_id'] = subnet_id
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._put('/host-networks/%s' % host_network_id,
+ data=data)
+
+ def delete_host_network(self, host_id, host_network_id):
+ return self._delete('/hosts/%s/networks/%s',
+ (host_id, host_network_id))
+
+ def delete_hostnetwork(self, host_network_id):
+ return self._delete('/host-networks/%s' % host_network_id)
+
+ def get_host_state(self, host_id):
+ return self._get('/hosts/%s/state' % host_id)
+
+ def update_host_state(self, host_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/hosts/%s/state' % host_id, date=data)
+
+ def poweron_host(self, host_id):
+ data = {}
+ data['poweron'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def poweroff_host(self, host_id):
+ data = {}
+ data['poweroff'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def reset_host(self, host_id):
+ data = {}
+ data['reset'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def clusterhost_ready(self, clusterhost_name):
+ data = {}
+ data['ready'] = True
+
+ return self._post('/clusterhosts/%s/state_internal' %
+ clusterhost_name, data=data)
diff --git a/compass-deck/apiclient/v1/__init__.py b/compass-deck/apiclient/v1/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/apiclient/v1/__init__.py
diff --git a/compass-deck/apiclient/v1/example.py b/compass-deck/apiclient/v1/example.py
new file mode 100755
index 0000000..6f7a7f7
--- /dev/null
+++ b/compass-deck/apiclient/v1/example.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Example code to deploy a cluster by compass client api."""
+import os
+import re
+import requests
+import sys
+import time
+
+from compass.apiclient.restful import Client
+
+
+COMPASS_SERVER_URL = 'http://127.0.0.1/api'
+SWITCH_IP = '10.145.81.220'
+SWITCH_SNMP_VERSION = 'v2c'
+SWITCH_SNMP_COMMUNITY = 'public'
+# MACHINES_TO_ADD = ['00:11:20:30:40:01']
+CLUSTER_NAME = 'cluster2'
+HOST_NAME_PREFIX = 'host'
+SERVER_USERNAME = 'root'
+SERVER_PASSWORD = 'root'
+SERVICE_USERNAME = 'service'
+SERVICE_PASSWORD = 'service'
+CONSOLE_USERNAME = 'console'
+CONSOLE_PASSWORD = 'console'
+HA_VIP = ''
+# NAMESERVERS = '192.168.10.6'
+SEARCH_PATH = 'ods.com'
+# GATEWAY = '192.168.10.6'
+# PROXY = 'http://192.168.10.6:3128'
+# NTP_SERVER = '192.168.10.6'
+MANAGEMENT_IP_START = '192.168.10.130'
+MANAGEMENT_IP_END = '192.168.10.254'
+MANAGEMENT_IP_GATEWAY = '192.168.10.1'
+MANAGEMENT_NETMASK = '255.255.255.0'
+MANAGEMENT_NIC = 'eth0'
+MANAGEMENT_PROMISC = 0
+TENANT_IP_START = '192.168.10.100'
+TENANT_IP_END = '192.168.10.255'
+TENANT_IP_GATEWAY = '192.168.10.1'
+TENANT_NETMASK = '255.255.255.0'
+TENANT_NIC = 'eth0'
+TENANT_PROMISC = 0
+PUBLIC_IP_START = '12.234.32.100'
+PUBLIC_IP_END = '12.234.32.255'
+PUBLIC_IP_GATEWAY = '12.234.32.1'
+PUBLIC_NETMASK = '255.255.255.0'
+PUBLIC_NIC = 'eth1'
+PUBLIC_PROMISC = 1
+STORAGE_IP_START = '172.16.100.100'
+STORAGE_IP_END = '172.16.100.255'
+STORAGE_NETMASK = '255.255.255.0'
+STORAGE_IP_GATEWAY = '172.16.100.1'
+STORAGE_NIC = 'eth0'
+STORAGE_PROMISC = 0
+HOME_PERCENTAGE = 5
+TMP_PERCENTAGE = 5
+VAR_PERCENTAGE = 10
+# ROLES_LIST = [['os-dashboard']]
+
+PRESET_VALUES = {
+ 'NAMESERVERS': '192.168.10.1',
+ 'NTP_SERVER': '192.168.10.1',
+ 'GATEWAY': '192.168.10.1',
+ 'PROXY': 'http://192.168.10.1:3128',
+ 'ROLES_LIST': 'os-dashboard',
+ 'MACHINES_TO_ADD': '00:11:20:30:40:01',
+ 'BUILD_TIMEOUT': 60
+}
+for v in PRESET_VALUES:
+ if v in os.environ.keys():
+ PRESET_VALUES[v] = os.environ.get(v)
+ print (v + PRESET_VALUES[v] + " is set by env variables")
+ else:
+ print (PRESET_VALUES[v])
+
+# get apiclient object.
+client = Client(COMPASS_SERVER_URL)
+
+
+# get all switches.
+status, resp = client.get_switches()
+print 'get all switches status: %s resp: %s' % (status, resp)
+
+# add a switch.
+status, resp = client.add_switch(
+ SWITCH_IP, version=SWITCH_SNMP_VERSION,
+ community=SWITCH_SNMP_COMMUNITY)
+
+print 'add a switch status: %s resp: %s' % (status, resp)
+
+if status < 400:
+ switch = resp['switch']
+else:
+ status, resp = client.get_switches()
+ print 'get all switches status: %s resp: %s' % (status, resp)
+ switch = None
+ for switch in resp['switches']:
+ if switch['ip'] == SWITCH_IP:
+ break
+
+switch_id = switch['id']
+switch_ip = switch['ip']
+
+
+# if the switch is not in under_monitoring, wait for the poll switch task
+# update the swich information and change the switch state.
+while switch['state'] != 'under_monitoring':
+ print 'waiting for the switch into under_monitoring'
+ status, resp = client.get_switch(switch_id)
+ print 'get switch %s status: %s, resp: %s' % (switch_id, status, resp)
+ switch = resp['switch']
+ time.sleep(10)
+
+
+# get machines connected to the switch.
+status, resp = client.get_machines(switch_id=switch_id)
+print 'get all machines under switch %s status: %s, resp: %s' % (
+ switch_id, status, resp)
+machines = {}
+MACHINES_TO_ADD = PRESET_VALUES['MACHINES_TO_ADD'].split()
+for machine in resp['machines']:
+ mac = machine['mac']
+ if mac in MACHINES_TO_ADD:
+ machines[machine['id']] = mac
+
+print 'machine to add: %s' % machines
+
+if set(machines.values()) != set(MACHINES_TO_ADD):
+ print 'only found macs %s while expected are %s' % (
+ machines.values(), MACHINES_TO_ADD)
+ sys.exit(1)
+
+
+# get adapters.
+status, resp = client.get_adapters()
+print 'get all adapters status: %s, resp: %s' % (status, resp)
+adapter_ids = []
+for adapter in resp['adapters']:
+ adapter_ids.append(adapter['id'])
+
+adapter_id = adapter_ids[0]
+print 'adpater for deploying a cluster: %s' % adapter_id
+
+
+# add a cluster.
+status, resp = client.add_cluster(
+ cluster_name=CLUSTER_NAME, adapter_id=adapter_id)
+print 'add cluster %s status: %s, resp: %s' % (CLUSTER_NAME, status, resp)
+cluster = resp['cluster']
+cluster_id = cluster['id']
+
+# add hosts to the cluster.
+status, resp = client.add_hosts(
+ cluster_id=cluster_id,
+ machine_ids=machines.keys())
+print 'add hosts to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+host_ids = []
+for host in resp['cluster_hosts']:
+ host_ids.append(host['id'])
+
+print 'added hosts: %s' % host_ids
+
+
+# set cluster security
+status, resp = client.set_security(
+ cluster_id, server_username=SERVER_USERNAME,
+ server_password=SERVER_PASSWORD,
+ service_username=SERVICE_USERNAME,
+ service_password=SERVICE_PASSWORD,
+ console_username=CONSOLE_USERNAME,
+ console_password=CONSOLE_PASSWORD)
+print 'set security config to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+
+
+# set cluster networking
+status, resp = client.set_networking(
+ cluster_id,
+ nameservers=PRESET_VALUES["NAMESERVERS"],
+ search_path=SEARCH_PATH,
+ gateway=PRESET_VALUES["GATEWAY"],
+ proxy=PRESET_VALUES["PROXY"],
+ ntp_server=PRESET_VALUES["NTP_SERVER"],
+ ha_vip=HA_VIP,
+ management_ip_start=MANAGEMENT_IP_START,
+ management_ip_end=MANAGEMENT_IP_END,
+ management_netmask=MANAGEMENT_NETMASK,
+ management_nic=MANAGEMENT_NIC,
+ management_gateway=MANAGEMENT_IP_GATEWAY,
+ management_promisc=MANAGEMENT_PROMISC,
+ tenant_ip_start=TENANT_IP_START,
+ tenant_ip_end=TENANT_IP_END,
+ tenant_netmask=TENANT_NETMASK,
+ tenant_nic=TENANT_NIC,
+ tenant_gateway=TENANT_IP_GATEWAY,
+ tenant_promisc=TENANT_PROMISC,
+ public_ip_start=PUBLIC_IP_START,
+ public_ip_end=PUBLIC_IP_END,
+ public_netmask=PUBLIC_NETMASK,
+ public_nic=PUBLIC_NIC,
+ public_gateway=PUBLIC_IP_GATEWAY,
+ public_promisc=PUBLIC_PROMISC,
+ storage_ip_start=STORAGE_IP_START,
+ storage_ip_end=STORAGE_IP_END,
+ storage_netmask=STORAGE_NETMASK,
+ storage_nic=STORAGE_NIC,
+ storage_gateway=STORAGE_IP_GATEWAY,
+ storage_promisc=STORAGE_PROMISC)
+print 'set networking config to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+
+
+# set partiton of each host in cluster
+status, resp = client.set_partition(
+ cluster_id,
+ home_percentage=HOME_PERCENTAGE,
+ tmp_percentage=TMP_PERCENTAGE,
+ var_percentage=VAR_PERCENTAGE)
+print 'set partition config to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+
+
+# set each host config in cluster.
+ROLES_LIST = [PRESET_VALUES['ROLES_LIST'].split()]
+for host_id in host_ids:
+ if ROLES_LIST:
+ roles = ROLES_LIST.pop(0)
+ else:
+ roles = []
+ status, resp = client.update_host_config(
+ host_id, hostname='%s%s' % (HOST_NAME_PREFIX, host_id),
+ roles=roles)
+ print 'set roles to host %s status: %s, resp: %s' % (
+ host_id, status, resp)
+
+
+# deploy cluster.
+status, resp = client.deploy_hosts(cluster_id)
+print 'deploy cluster %s status: %s, resp: %s' % (cluster_id, status, resp)
+
+
+# get intalling progress.
+BUILD_TIMEOUT = float(PRESET_VALUES['BUILD_TIMEOUT'])
+timeout = time.time() + BUILD_TIMEOUT * 60
+while True:
+ status, resp = client.get_cluster_installing_progress(cluster_id)
+ print 'get cluster %s installing progress status: %s, resp: %s' % (
+ cluster_id, status, resp)
+ progress = resp['progress']
+ if (
+ progress['state'] not in ['UNINITIALIZED', 'INSTALLING'] or
+ progress['percentage'] >= 1.0
+ ):
+ break
+ if (
+ time.time() > timeout
+ ):
+ raise Exception("Timeout! The system is not ready in time.")
+
+ for host_id in host_ids:
+ status, resp = client.get_host_installing_progress(host_id)
+ print 'get host %s installing progress status: %s, resp: %s' % (
+ host_id, status, resp)
+
+ time.sleep(60)
+
+
+status, resp = client.get_dashboard_links(cluster_id)
+print 'get cluster %s dashboardlinks status: %s, resp: %s' % (
+ cluster_id, status, resp)
+dashboardlinks = resp['dashboardlinks']
+if not dashboardlinks.keys():
+ raise Exception("Dashboard link is not found!")
+for x in dashboardlinks.keys():
+ if x in ("os-dashboard", "os-controller"):
+ dashboardurl = dashboardlinks.get(x)
+ if dashboardurl is None:
+ raise Exception("No dashboard link is found")
+ r = requests.get(dashboardurl, verify=False)
+ r.raise_for_status()
+ match = re.search(
+ r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)', r.text)
+ if match:
+ print 'dashboard login page can be downloaded'
+ break
+ print (
+ 'dashboard login page failed to be downloaded\n'
+ 'the context is:\n%s\n') % r.text
+ raise Exception("os-dashboard is not properly installed!")
diff --git a/compass-deck/apiclient/v1/restful.py b/compass-deck/apiclient/v1/restful.py
new file mode 100644
index 0000000..3fb235c
--- /dev/null
+++ b/compass-deck/apiclient/v1/restful.py
@@ -0,0 +1,655 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass api client library.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import json
+import logging
+import requests
+
+
+class Client(object):
+ """wrapper for compass restful api.
+
+ .. note::
+ Every api client method returns (status as int, resp as dict).
+ If the api succeeds, the status is 2xx, the resp includes
+ {'status': 'OK'} and other keys depend on method.
+ If the api fails, the status is 4xx, the resp includes {
+ 'status': '...', 'message': '...'}
+ """
+
+ def __init__(self, url, headers=None, proxies=None, stream=None):
+ """Restful api client initialization.
+
+ :param url: url to the compass web service.
+ :type url: str.
+ :param headers: http header sent in each restful request.
+ :type headers: dict of header name (str) to heade value (str).
+ :param proxies: the proxy address for each protocol.
+ :type proxies: dict of protocol (str) to proxy url (str).
+ :param stream: wether the restful response should be streamed.
+ :type stream: bool.
+ """
+ self.url_ = url
+ self.session_ = requests.Session()
+ if headers:
+ self.session_.headers = headers
+
+ if proxies is not None:
+ self.session_.proxies = proxies
+
+ if stream is not None:
+ self.session_.stream = stream
+
+ def __del__(self):
+ self.session_.close()
+
+ @classmethod
+ def _get_response(cls, resp):
+ """decapsulate the resp to status code and python formatted data."""
+ resp_obj = {}
+ try:
+ resp_obj = resp.json()
+ except Exception as error:
+ logging.error('failed to load object from %s: %s',
+ resp.url, resp.content)
+ logging.exception(error)
+ resp_obj['status'] = 'Json Parsing Failure'
+ resp_obj['message'] = resp.content
+
+ return resp.status_code, resp_obj
+
+ def _get(self, relative_url, params=None):
+ """encapsulate get method."""
+ url = '%s%s' % (self.url_, relative_url)
+ if params:
+ resp = self.session_.get(url, params=params)
+ else:
+ resp = self.session_.get(url)
+
+ return self._get_response(resp)
+
+ def _post(self, relative_url, data=None):
+ """encapsulate post method."""
+ url = '%s%s' % (self.url_, relative_url)
+ if data:
+ resp = self.session_.post(url, json.dumps(data))
+ else:
+ resp = self.session_.post(url)
+
+ return self._get_response(resp)
+
+ def _put(self, relative_url, data=None):
+ """encapsulate put method."""
+ url = '%s%s' % (self.url_, relative_url)
+ if data:
+ resp = self.session_.put(url, json.dumps(data))
+ else:
+ resp = self.session_.put(url)
+
+ return self._get_response(resp)
+
+ def _delete(self, relative_url):
+ """encapsulate delete method."""
+ url = '%s%s' % (self.url_, relative_url)
+ return self._get_response(self.session_.delete(url))
+
+ def get_switches(self, switch_ips=None, switch_networks=None, limit=None):
+ """List details for switches.
+
+ .. note::
+ The switches can be filtered by switch_ips, siwtch_networks and
+ limit. These params can be None or missing. If the param is None
+ or missing, that filter will be ignored.
+
+ :param switch_ips: Filter switch(es) with IP(s).
+ :type switch_ips: list of str. Each is as 'xxx.xxx.xxx.xxx'.
+ :param switch_networks: Filter switche(es) with network(s).
+ :type switch_networks: list of str. Each is as 'xxx.xxx.xxx.xxx/xx'.
+ :param limit: int, The maximum number of switches to return.
+ :type limit: int. 0 means unlimited.
+ """
+ params = {}
+ if switch_ips:
+ params['switchIp'] = switch_ips
+
+ if switch_networks:
+ params['switchIpNetwork'] = switch_networks
+
+ if limit:
+ params['limit'] = limit
+ return self._get('/switches', params=params)
+
+ def get_switch(self, switch_id):
+ """Lists details for a specified switch.
+
+ :param switch_id: switch id.
+ :type switch_id: int.
+ """
+ return self._get('/switches/%s' % switch_id)
+
+ def add_switch(self, switch_ip, version=None, community=None,
+ username=None, password=None, raw_data=None):
+ """Create a switch with specified details.
+
+ .. note::
+ It will trigger switch polling if successful. During
+ the polling, MAC address of the devices connected to the
+ switch will be learned by SNMP or SSH.
+
+ :param switch_ip: the switch IP address.
+ :type switch_ip: str, as xxx.xxx.xxx.xxx.
+ :param version: SNMP version when using SNMP to poll switch.
+ :type version: str, one in ['v1', 'v2c', 'v3']
+ :param community: SNMP community when using SNMP to poll switch.
+ :type community: str, usually 'public'.
+ :param username: SSH username when using SSH to poll switch.
+ :type username: str.
+ :param password: SSH password when using SSH to poll switch.
+ :type password: str.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['switch'] = {}
+ data['switch']['ip'] = switch_ip
+ data['switch']['credential'] = {}
+ if version:
+ data['switch']['credential']['version'] = version
+
+ if community:
+ data['switch']['credential']['community'] = community
+
+ if username:
+ data['switch']['credential']['username'] = username
+
+ if password:
+ data['switch']['credential']['password'] = password
+
+ return self._post('/switches', data=data)
+
+ def update_switch(self, switch_id, ip_addr=None,
+ version=None, community=None,
+ username=None, password=None,
+ raw_data=None):
+ """Updates a switch with specified details.
+
+ .. note::
+ It will trigger switch polling if successful. During
+ the polling, MAC address of the devices connected to the
+ switch will be learned by SNMP or SSH.
+
+ :param switch_id: switch id
+ :type switch_id: int.
+ :param ip_addr: the switch ip address.
+ :type ip_addr: str, as 'xxx.xxx.xxx.xxx' format.
+ :param version: SNMP version when using SNMP to poll switch.
+ :type version: str, one in ['v1', 'v2c', 'v3'].
+ :param community: SNMP community when using SNMP to poll switch.
+ :type community: str, usually be 'public'.
+ :param username: username when using SSH to poll switch.
+ :type username: str.
+ :param password: password when using SSH to poll switch.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['switch'] = {}
+ if ip_addr:
+ data['switch']['ip'] = ip_addr
+
+ data['switch']['credential'] = {}
+ if version:
+ data['switch']['credential']['version'] = version
+
+ if community:
+ data['switch']['credential']['community'] = community
+
+ if username:
+ data['switch']['credential']['username'] = username
+
+ if password:
+ data['switch']['credential']['password'] = password
+
+ return self._put('/switches/%s' % switch_id, data=data)
+
+ def delete_switch(self, switch_id):
+ """Not implemented in api."""
+ return self._delete('/switches/%s' % switch_id)
+
+ def get_machines(self, switch_id=None, vlan_id=None,
+ port=None, limit=None):
+ """Get the details of machines.
+
+ .. note::
+ The machines can be filtered by switch_id, vlan_id, port
+ and limit. These params can be None or missing. If the param
+ is None or missing, the filter will be ignored.
+
+ :param switch_id: Return machine(s) connected to the switch.
+ :type switch_id: int.
+ :param vlan_id: Return machine(s) belonging to the vlan.
+ :type vlan_id: int.
+ :param port: Return machine(s) connect to the port.
+ :type port: int.
+ :param limit: the maximum number of machines will be returned.
+ :type limit: int. 0 means no limit.
+ """
+ params = {}
+ if switch_id:
+ params['switchId'] = switch_id
+
+ if vlan_id:
+ params['vlanId'] = vlan_id
+
+ if port:
+ params['port'] = port
+
+ if limit:
+ params['limit'] = limit
+
+ return self._get('/machines', params=params)
+
+ def get_machine(self, machine_id):
+ """Lists the details for a specified machine.
+
+ :param machine_id: Return machine with the id.
+ :type machine_id: int.
+ """
+ return self._get('/machines/%s' % machine_id)
+
+ def get_clusters(self):
+ """Lists the details for all clusters."""
+ return self._get('/clusters')
+
+ def get_cluster(self, cluster_id):
+ """Lists the details of the specified cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ """
+ return self._get('/clusters/%d' % cluster_id)
+
+ def add_cluster(self, cluster_name, adapter_id, raw_data=None):
+ """Creates a cluster by specified name and given adapter id.
+
+ :param cluster_name: cluster name.
+ :type cluster_name: str.
+ :param adapter_id: adapter id.
+ :type adapter_id: int.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['cluster'] = {}
+ data['cluster']['name'] = cluster_name
+ data['cluster']['adapter_id'] = adapter_id
+ return self._post('/clusters', data=data)
+
+ def add_hosts(self, cluster_id, machine_ids, raw_data=None):
+ """add the specified machine(s) as the host(s) to the cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param machine_ids: machine ids to add to cluster.
+ :type machine_ids: list of int, each is the id of one machine.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['addHosts'] = machine_ids
+ return self._post('/clusters/%d/action' % cluster_id, data=data)
+
+ def remove_hosts(self, cluster_id, host_ids, raw_data=None):
+ """remove the specified host(s) from the cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param host_ids: host ids to remove from cluster.
+ :type host_ids: list of int, each is the id of one host.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['removeHosts'] = host_ids
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def replace_hosts(self, cluster_id, machine_ids, raw_data=None):
+ """replace the cluster hosts with the specified machine(s).
+
+ :param cluster_id: int, The unique identifier of the cluster.
+ :type cluster_id: int.
+ :param machine_ids: the machine ids to replace the hosts in cluster.
+ :type machine_ids: list of int, each is the id of one machine.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['replaceAllHosts'] = machine_ids
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def deploy_hosts(self, cluster_id, raw_data=None):
+ """Deploy the cluster.
+
+ :param cluster_id: The unique identifier of the cluster
+ :type cluster_id: int.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['deploy'] = []
+ return self._post('/clusters/%d/action' % cluster_id, data=data)
+
+ @classmethod
+ def parse_security(cls, kwargs):
+ """parse the arguments to security data."""
+ data = {}
+ for key, value in kwargs.items():
+ if '_' not in key:
+ continue
+ key_name, key_value = key.split('_', 1)
+ data.setdefault(
+ '%s_credentials' % key_name, {})[key_value] = value
+
+ return data
+
+ def set_security(self, cluster_id, **kwargs):
+ """Update the cluster security configuration.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param <security_name>_username: username of the security name.
+ :type <security_name>_username: str.
+ :param <security_name>_password: passowrd of the security name.
+ :type <security_name>_password: str.
+
+ .. note::
+ security_name should be one of ['server', 'service', 'console'].
+ """
+ data = {}
+ data['security'] = self.parse_security(kwargs)
+ return self._put('/clusters/%d/security' % cluster_id, data=data)
+
+ @classmethod
+ def parse_networking(cls, kwargs):
+ """parse arguments to network data."""
+ data = {}
+ global_keys = [
+ 'nameservers', 'search_path', 'gateway',
+ 'proxy', 'ntp_server', 'ha_vip']
+ for key, value in kwargs.items():
+ if key in global_keys:
+ data.setdefault('global', {})[key] = value
+ else:
+ if '_' not in key:
+ continue
+
+ key_name, key_value = key.split('_', 1)
+ data.setdefault(
+ 'interfaces', {}
+ ).setdefault(
+ key_name, {}
+ )[key_value] = value
+
+ return data
+
+ def set_networking(self, cluster_id, **kwargs):
+ """Update the cluster network configuration.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param nameservers: comma seperated nameserver ip address.
+ :type nameservers: str.
+ :param search_path: comma seperated dns name search path.
+ :type search_path: str.
+ :param gateway: gateway ip address for routing to outside.
+ :type gateway: str.
+ :param proxy: proxy url for downloading packages.
+ :type proxy: str.
+ :param ntp_server: ntp server ip address to sync timestamp.
+ :type ntp_server: str.
+ :param ha_vip: ha vip address to run ha proxy.
+ :type ha_vip: str.
+ :param <interface>_ip_start: start ip address to host's interface.
+ :type <interface>_ip_start: str.
+ :param <interface>_ip_end: end ip address to host's interface.
+ :type <interface>_ip_end: str.
+ :param <interface>_netmask: netmask to host's interface.
+ :type <interface>_netmask: str.
+ :param <interface>_nic: host physical interface name.
+ :type <interface>_nic: str.
+ :param <interface>_promisc: if the interface in promiscous mode.
+ :type <interface>_promisc: int, 0 or 1.
+
+ .. note::
+ interface should be one of ['management', 'tenant',
+ 'public', 'storage'].
+ """
+ data = {}
+ data['networking'] = self.parse_networking(kwargs)
+ return self._put('/clusters/%d/networking' % cluster_id, data=data)
+
+ @classmethod
+ def parse_partition(cls, kwargs):
+ """parse arguments to partition data."""
+ data = {}
+ for key, value in kwargs.items():
+ if key.endswith('_percentage'):
+ key_name = key[:-len('_percentage')]
+ data[key_name] = '%s%%' % value
+ elif key.endswitch('_mbytes'):
+ key_name = key[:-len('_mbytes')]
+ data[key_name] = str(value)
+
+ return ';'.join([
+ '/%s %s' % (key, value) for key, value in data.items()
+ ])
+
+ def set_partition(self, cluster_id, **kwargs):
+ """Update the cluster partition configuration.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param <partition>_percentage: the partiton percentage.
+ :type <partition>_percentage: float between 0 to 100.
+ :param <partition>_mbytes: the partition mbytes.
+ :type <partition>_mbytes: int.
+
+ .. note::
+ partition should be one of ['home', 'var', 'tmp'].
+ """
+ data = {}
+ data['partition'] = self.parse_partition(kwargs)
+ return self._put('/clusters/%s/partition' % cluster_id, data=data)
+
+ def get_hosts(self, hostname=None, clustername=None):
+ """Lists the details of hosts.
+
+ .. note::
+ The hosts can be filtered by hostname, clustername.
+ These params can be None or missing. If the param
+ is None or missing, the filter will be ignored.
+
+ :param hostname: The name of a host.
+ :type hostname: str.
+ :param clustername: The name of a cluster.
+ :type clustername: str.
+ """
+ params = {}
+ if hostname:
+ params['hostname'] = hostname
+
+ if clustername:
+ params['clustername'] = clustername
+
+ return self._get('/clusterhosts', params=params)
+
+ def get_host(self, host_id):
+ """Lists the details for the specified host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ """
+ return self._get('/clusterhosts/%s' % host_id)
+
+ def get_host_config(self, host_id):
+ """Lists the details of the config for the specified host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ """
+ return self._get('/clusterhosts/%s/config' % host_id)
+
+ def update_host_config(self, host_id, hostname=None,
+ roles=None, raw_data=None, **kwargs):
+ """Updates config for the host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ :param hostname: host name.
+ :type hostname: str.
+ :param security_<security>_username: username of the security name.
+ :type security_<security>_username: str.
+ :param security_<security>_password: passowrd of the security name.
+ :type security_<security>_password: str.
+ :param networking_nameservers: comma seperated nameserver ip address.
+ :type networking_nameservers: str.
+ :param networking_search_path: comma seperated dns name search path.
+ :type networking_search_path: str.
+ :param networking_gateway: gateway ip address for routing to outside.
+ :type networking_gateway: str.
+ :param networking_proxy: proxy url for downloading packages.
+ :type networking_proxy: str.
+ :param networking_ntp_server: ntp server ip address to sync timestamp.
+ :type networking_ntp_server: str.
+ :param networking_<interface>_ip: ip address to host interface.
+ :type networking_<interface>_ip: str.
+ :param networking_<interface>_netmask: netmask to host's interface.
+ :type networking_<interface>_netmask: str.
+ :param networking_<interface>_nic: host physical interface name.
+ :type networking_<interface>_nic: str.
+ :param networking_<interface>_promisc: if the interface is promiscous.
+ :type networking_<interface>_promisc: int, 0 or 1.
+ :param partition_<partition>_percentage: the partiton percentage.
+ :type partition_<partition>_percentage: float between 0 to 100.
+ :param partition_<partition>_mbytes: the partition mbytes.
+ :type partition_<partition>_mbytes: int.
+ :param roles: host assigned roles in the cluster.
+ :type roles: list of str.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if hostname:
+ data['hostname'] = hostname
+
+ sub_kwargs = {}
+ for key, value in kwargs.items():
+ key_name, key_value = key.split('_', 1)
+ sub_kwargs.setdefault(key_name, {})[key_value] = value
+
+ if 'security' in sub_kwargs:
+ data['security'] = self.parse_security(sub_kwargs['security'])
+
+ if 'networking' in sub_kwargs:
+ data['networking'] = self.parse_networking(
+ sub_kwargs['networking'])
+ if 'partition' in sub_kwargs:
+ data['partition'] = self.parse_partition(
+ sub_kwargs['partition'])
+
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusterhosts/%s/config' % host_id, data)
+
+ def delete_from_host_config(self, host_id, delete_key):
+ """Deletes one key in config for the host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ :param delete_key: the key in host config to be deleted.
+ :type delete_key: str.
+ """
+ return self._delete('/clusterhosts/%s/config/%s' % (
+ host_id, delete_key))
+
+ def get_adapters(self, name=None):
+ """Lists details of adapters.
+
+ .. note::
+ the adapter can be filtered by name of name is given and not None.
+
+ :param name: adapter name.
+ :type name: str.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+
+ return self._get('/adapters', params=params)
+
+ def get_adapter(self, adapter_id):
+ """Lists details for the specified adapter.
+
+ :param adapter_id: adapter id.
+ :type adapter_id: int.
+ """
+ return self._get('/adapters/%s' % adapter_id)
+
+ def get_adapter_roles(self, adapter_id):
+ """Lists roles to assign to hosts for the specified adapter.
+
+ :param adapter_id: adapter id.
+ :type adapter_id: int.
+ """
+ return self._get('/adapters/%s/roles' % adapter_id)
+
+ def get_host_installing_progress(self, host_id):
+ """Lists progress details for the specified host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ """
+ return self._get('/clusterhosts/%s/progress' % host_id)
+
+ def get_cluster_installing_progress(self, cluster_id):
+ """Lists progress details for the specified cluster.
+
+ :param cluster_id: cluster id.
+ :param cluster_id: int.
+ """
+
+ return self._get('/clusters/%s/progress' % cluster_id)
+
+ def get_dashboard_links(self, cluster_id):
+ """Lists links for dashboards of deployed cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ """
+ params = {}
+ params['cluster_id'] = cluster_id
+ return self._get('/dashboardlinks', params)
diff --git a/compass-deck/bin/README.md b/compass-deck/bin/README.md
new file mode 100644
index 0000000..7052059
--- /dev/null
+++ b/compass-deck/bin/README.md
@@ -0,0 +1,66 @@
+Compass Binaries and Scripts
+=============================
+
+bin/ contains compass heavy-lifting utility scripts and binaries. These scripts are often called by different components. Some are from core python modules and some are from compass daemon and other services. Most files in `bin/` are placed under `/opt/compass/bin/` after install.sh is complete. Some of them will go to `/usr/bin/` or `/etc/init.d/` as system binaries or services.
+
+###Directories and Files
+
+Below is a walkthrough of all directories and files.
+
+ * ansible-callbacks/ - contains callback scripts for ansible installer.
+ * playbook_done.py - triggered by ansible when all playbooks are successfully executed.
+ Then the script will call compass API to report ansible "complete" status.
+ * chef/ - utility scripts for chef installer.
+ * addcookbooks.py - upload all chef cookbooks to the chef server.
+ * adddatabags.py - (deprecated) upload all chef databags to the chef server.
+ * addroles.py - upload all chef roles to the chef server.
+ * clean_clients.sh - remove all chef clients on the chef server.
+ * clean_environments.sh - remove all chef environments on the chef server.
+ * clean_nodes.sh - remove all chef nodes on the chef server.
+ * cobbler/ - utility scripts for cobbler installer
+ * remove_systems.sh - remove all systems on the cobbler server.
+ * clean_installation_logs.py - remove all the installation logs.
+ * clean_installers.py - remove all configurations and data from all installers.
+ * client.sh - sample client script to call client.py
+ * client.py - compass python client that calls API and deploy a cluster based on given configurations.
+ * compass_check.py - binary file that is placed as /usr/bin/compass. This is the main entrance of compass check CLI.
+ * compassd - (deprecated) old compass daemon file
+ * compass_wsgi.py - compass wsgi module.
+ * csvdeploy.py - script that enable the deployment of clusters from spreadsheets.
+ * delete_clusters.py - script that deletes all given clusters and their underlying hosts.
+ * manage_db.py - utility binary that manages database.
+ * poll_switch.py - utility script to poll machine mac addresses that are connected to certain switches.
+ * progress_update.py - main script to run as a service to update hosts installing progresses.
+ * query_switch.py - independent script to query a switch.
+ * refresh.sh - refresh compass-db, restart compass services and clean up all installers.
+ * runserver.py - manually run a compass server instance.
+ * switch_virtualenv.py.template - template of switch_virtualenv.py. This script enables switching between python
+ virtual environments.
+
+###Script Location and Calling Modules
+Script name | Location | Called by
+--- | --- | ---
+ansible-callbacks/playbook_done.py | /opt/compass/bin/ansible-callbacks/playbookd_done.py | ***ansible-playbook***
+chef/addcookbooks.py | /opt/compass/bin/addcookbooks.py | ***install/chef.sh***
+chef/adddatabags.py(deprecated) | /opt/compass/bin/addcookbooks.py | None
+chef/addroles.py | /opt/compass/bin/addroles.py | ***install/chef.sh***
+chef/clean_clients.sh | /opt/compass/bin/clean_clients.sh | ***compass.tasks.clean_package_installer***
+chef/clean_environments.sh | /opt/compass/bin/clean_environments.sh | ***compass.tasks.clean_package_installer***
+chef/clean_nodes.sh | /opt/compass/bin/clean_nodes.sh | ***compass.tasks.clean_package_installer***
+cobbler/remove_systems.sh | /opt/compass/bin/remove_systems.sh | ***compass.tasks.clean_os_installer***
+clean_installation_logs.py | /opt/compass/bin/clean_installation_logs.py | ***bin/refresh.sh***
+clean_installers.py | /opt/compass/bin/clean_installers.py | ***bin/refresh.sh***
+client.sh | /opt/compass/bin/client.sh | sample client
+client.py | /opt/compass/bin/client.py | ***regtest/regtest.sh***
+compsas_check.py | /opt/compass/bin/compass_check.py | ***compass check cli***
+compassd(deprecated) | None | None
+compass_wsgi.py | /var/www/compass/compass.wsgi | ***Apache daemon***
+csvdeploy.py | /opt/compass/bin/csvdeploy.py | command-line script
+delete_clusters.py | /opt/compass/bin/delete_clusters.py | command-line script
+manage_db.py | /opt/compass/bin/manage_db.py | ***install/compass.sh*** and command-line script
+poll_switch.py | /opt/compass/bin/poll_switch.py | command-line script
+progress_update.py | /opt/compass/bin/progress_update.py | ***compass-progress-updated daemon***
+query_switch.py | /opt/compass/bin/query_switch.py | command-line script
+refresh.sh | /opt/compass/bin/refresh.sh | command-line script
+runserver.py | /opt/compass/bin/runserver.py | command-line script
+switch_virtualenv.py.template | /opt/compass/bin/switch_virtualenv.py | ***all scripts using this library***
diff --git a/compass-deck/bin/ansible_callbacks/playbook_done.py b/compass-deck/bin/ansible_callbacks/playbook_done.py
new file mode 100755
index 0000000..23d75a9
--- /dev/null
+++ b/compass-deck/bin/ansible_callbacks/playbook_done.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Ansible playbook callback after a playbook run has completed."""
+import logging
+import os
+import simplejson as json
+import sys
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir + '/..')
+
+import switch_virtualenv
+
+from compass.apiclient.restful import Client
+from compass.utils import flags
+
+flags.add('compass_server',
+ help='compass server url',
+ default='http://127.0.0.1/api')
+flags.add('compass_user_email',
+ help='compass user email',
+ default='admin@huawei.com')
+flags.add('compass_user_password',
+ help='compass user password',
+ default='admin')
+
+
+class CallbackModule(object):
+ def __init__(self):
+ self.disabled = False
+ try:
+ self.client = self._get_client()
+ except Exception:
+ self.disabled = True
+ logging.error("No compass server found"
+ "disabling this plugin")
+
+ def _get_client(self):
+ return Client(flags.OPTIONS.compass_server)
+
+ def _login(self, client):
+ """get apiclient token."""
+ status, resp = client.get_token(
+ flags.OPTIONS.compass_user_email,
+ flags.OPTIONS.compass_user_password
+ )
+ logging.info(
+ 'login status: %s, resp: %s',
+ status, resp
+ )
+ if status >= 400:
+ raise Exception(
+ 'failed to login %s with user %s',
+ flags.OPTIONS.compass_server,
+ flags.OPTIONS.compass_user_email
+ )
+ return resp['token']
+
+ def playbook_on_stats(self, stats):
+ hosts = sorted(stats.processed.keys())
+ host_vars = self.playbook.inventory.get_variables(hosts[0])
+ cluster_name = host_vars['cluster_name']
+
+ failures = False
+ unreachable = False
+
+ for host in hosts:
+ summary = stats.summarize(host)
+
+ if summary['failures'] > 0:
+ failures = True
+ if summary['unreachable'] > 0:
+ unreachable = True
+
+ if failures or unreachable:
+ return
+
+ self._login(self.client)
+
+ for host in hosts:
+ clusterhost_name = host + "." + cluster_name
+ self.client.clusterhost_ready(clusterhost_name)
diff --git a/compass-deck/bin/chef/addcookbooks.py b/compass-deck/bin/chef/addcookbooks.py
new file mode 100755
index 0000000..f23dac4
--- /dev/null
+++ b/compass-deck/bin/chef/addcookbooks.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""import cookbooks to chef server."""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+
+from compass.utils import flags
+from compass.utils import logsetting
+
+
+flags.add('cookbooks_dir',
+ help='chef cookbooks directory',
+ default='/var/chef/cookbooks')
+
+
+def main():
+ """main entry."""
+ flags.init()
+ logsetting.init()
+ cookbooks_dir = flags.OPTIONS.cookbooks_dir
+ logging.info('add cookbooks %s', cookbooks_dir)
+ cmd = "knife cookbook upload --all --cookbook-path %s" % cookbooks_dir
+ status = os.system(cmd)
+ logging.info('run cmd %s returns %s', cmd, status)
+ if status:
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/compass-deck/bin/chef/adddatabags.py b/compass-deck/bin/chef/adddatabags.py
new file mode 100755
index 0000000..ba2d08c
--- /dev/null
+++ b/compass-deck/bin/chef/adddatabags.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""import databags to chef server."""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.utils import flags
+from compass.utils import logsetting
+
+
+flags.add('databags_dir',
+ help='chef databags directory',
+ default='/var/chef/databags')
+
+
+def main():
+ """main entry."""
+ flags.init()
+ logsetting.init()
+ databags = []
+ databags_dir = flags.OPTIONS.databags_dir
+ for item in os.listdir(databags_dir):
+ databags.append(item)
+
+ for databag in databags:
+ logging.info('add databag %s', databag)
+ cmd = "knife data bag create %s" % databag
+ os.system(cmd)
+ databag_items = []
+ databagitem_dir = os.path.join(databags_dir, databag)
+ for item in os.listdir(databagitem_dir):
+ if item.endswith('.json'):
+ databag_items.append(os.path.join(databagitem_dir, item))
+ else:
+ logging.info('ignore %s in %s', item, databagitem_dir)
+
+ for databag_item in databag_items:
+ logging.info('add databag item %s to databag %s',
+ databag_item, databag)
+ cmd = 'knife data bag from file %s %s' % (databag, databag_item)
+ status = os.system(cmd)
+ logging.info('run cmd %s returns %s', cmd, status)
+ if status:
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/compass-deck/bin/chef/addroles.py b/compass-deck/bin/chef/addroles.py
new file mode 100755
index 0000000..2745506
--- /dev/null
+++ b/compass-deck/bin/chef/addroles.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""script to import roles to chef server."""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.utils import flags
+from compass.utils import logsetting
+
+
+flags.add('roles_dir',
+ help='chef roles directory',
+ default='/var/chef/roles')
+
+
+def main():
+ """main entry."""
+ flags.init()
+ logsetting.init()
+ rolelist = []
+ roles_dir = flags.OPTIONS.roles_dir
+
+ for item in os.listdir(roles_dir):
+ if item.endswith('.rb') or item.endswith('.json'):
+ rolelist.append(os.path.join(roles_dir, item))
+ else:
+ logging.info('ignore %s in %s', item, roles_dir)
+
+ for role in rolelist:
+ logging.info('add role %s', role)
+ cmd = "knife role from file %s" % role
+ status = os.system(cmd)
+ logging.info('run cmd %s returns %s', cmd, status)
+ if status:
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/compass-deck/bin/chef/clean_clients.sh b/compass-deck/bin/chef/clean_clients.sh
new file mode 100755
index 0000000..7a26bea
--- /dev/null
+++ b/compass-deck/bin/chef/clean_clients.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+echo "clean chef clients"
+yes | knife client bulk delete '^(?!chef-).*'
+if [[ "$?" != "0" ]]; then
+ echo "failed to clean all clients"
+fi
diff --git a/compass-deck/bin/chef/clean_environments.sh b/compass-deck/bin/chef/clean_environments.sh
new file mode 100755
index 0000000..f9b5052
--- /dev/null
+++ b/compass-deck/bin/chef/clean_environments.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+echo "clean chef environments"
+environments=$(knife environment list)
+for environment in $environments; do
+ if [[ "$environment" != "_default" ]]; then
+ yes | knife environment delete $environment
+ if [[ "$?" != "0" ]]; then
+ echo "failed to delete environment $environment"
+ else
+ echo "environment $environment is deleted"
+ fi
+ fi
+done
diff --git a/compass-deck/bin/chef/clean_nodes.sh b/compass-deck/bin/chef/clean_nodes.sh
new file mode 100755
index 0000000..8224b82
--- /dev/null
+++ b/compass-deck/bin/chef/clean_nodes.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+echo "clean chef nodes"
+yes | knife node bulk delete '.*'
+if [[ "$?" != "0" ]]; then
+ echo "failed to clean all nodes"
+fi
diff --git a/compass-deck/bin/clean_installation_logs.py b/compass-deck/bin/clean_installation_logs.py
new file mode 100755
index 0000000..0ae20f1
--- /dev/null
+++ b/compass-deck/bin/clean_installation_logs.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""clean all installation logs."""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+def clean_installation_logs():
+ installation_log_dirs = setting.INSTALLATION_LOGDIR
+ successful = True
+ for _, logdir in installation_log_dirs.items():
+ cmd = 'rm -rf %s/*' % logdir
+ status = os.system(cmd)
+ logging.info('run cmd %s resturns %s', cmd, status)
+ if status:
+ successful = False
+ return successful
+
+
+if __name__ == "__main__":
+ flags.init()
+ logsetting.init()
+ clean_installation_logs()
diff --git a/compass-deck/bin/clean_installers.py b/compass-deck/bin/clean_installers.py
new file mode 100755
index 0000000..ae6dab2
--- /dev/null
+++ b/compass-deck/bin/clean_installers.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Scripts to delete cluster and it hosts"""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.actions import clean
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import database
+from compass.db.api import user as user_api
+from compass.tasks.client import celery
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+flags.add_bool('async',
+ help='run in async mode',
+ default=True)
+
+flags.add('os_installers',
+ help='comma seperated os installers',
+ default='')
+flags.add('package_installers',
+ help='comma separated package installers',
+ default='')
+
+
+def clean_installers():
+ os_installers = [
+ os_installer
+ for os_installer in flags.OPTIONS.os_installers.split(',')
+ if os_installer
+ ]
+ package_installers = [
+ package_installer
+ for package_installer in flags.OPTIONS.package_installers.split(',')
+ if package_installer
+ ]
+ user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
+ adapters = adapter_api.list_adapters(user=user)
+ filtered_os_installers = {}
+ filtered_package_installers = {}
+ for adapter in adapters:
+ logging.info(
+ 'got adapter: %s', adapter
+ )
+ if 'os_installer' in adapter:
+ os_installer = adapter['os_installer']
+ os_installer_name = os_installer['alias']
+ if not os_installers or os_installer_name in os_installers:
+ filtered_os_installers[os_installer_name] = os_installer
+ else:
+ logging.info(
+ 'ignore os installer %s', os_installer_name
+ )
+ else:
+ logging.info(
+ 'cannot find os installer in adapter %s',
+ adapter['name']
+ )
+ if 'package_installer' in adapter:
+ package_installer = adapter['package_installer']
+ package_installer_name = package_installer['alias']
+ if (
+ not package_installers or
+ package_installer_name in package_installers
+ ):
+ filtered_package_installers[package_installer_name] = (
+ package_installer
+ )
+ else:
+ logging.info(
+ 'ignore package installer %s', package_installer_name
+ )
+ else:
+ logging.info(
+ 'cannot find package installer in adapter %s',
+ adapter['name']
+ )
+ logging.info(
+ 'clean os installers: %s', filtered_os_installers.keys()
+ )
+ logging.info(
+ 'clean package installers: %s', filtered_package_installers.keys()
+ )
+ if flags.OPTIONS.async:
+ for os_installer_name, os_installer in filtered_os_installers.items():
+ celery.send_task(
+ 'compass.tasks.clean_os_installer',
+ (
+ os_installer['name'],
+ os_installer['settings']
+ )
+ )
+ for package_installer_name, package_installer in (
+ filtered_package_installers.items()
+ ):
+ celery.send_task(
+ 'compass.tasks.clean_package_installer',
+ (
+ package_installer['name'],
+ package_installer['settings']
+ )
+ )
+ else:
+ for os_installer_name, os_installer in (
+ filtered_os_installers.items()
+ ):
+ try:
+ clean.clean_os_installer(
+ os_installer['name'],
+ os_installer['settings']
+ )
+ except Exception as error:
+ logging.error(
+ 'failed to clean os installer %s', os_installer_name
+ )
+ logging.exception(error)
+ for package_installer_name, package_installer in (
+ filtered_package_installers.items()
+ ):
+ try:
+ clean.clean_package_installer(
+ package_installer['name'],
+ package_installer['settings']
+ )
+ except Exception as error:
+ logging.error(
+ 'failed to clean package installer %s',
+ package_installer_name
+ )
+ logging.exception(error)
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ database.init()
+ clean_installers()
diff --git a/compass-deck/bin/client.py b/compass-deck/bin/client.py
new file mode 100755
index 0000000..d8eb59f
--- /dev/null
+++ b/compass-deck/bin/client.py
@@ -0,0 +1,1006 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""binary to deploy a cluster by compass client api."""
+from collections import defaultdict
+import itertools
+import json
+import netaddr
+import os
+import re
+import requests
+from restful import Client
+import socket
+import sys
+import time
+import yaml
+
+ROLE_UNASSIGNED = True
+ROLE_ASSIGNED = False
+
+import log as logging
+LOG = logging.getLogger(__name__)
+
+from oslo_config import cfg
+CONF = cfg.CONF
+
+
+def byteify(input):
+ if isinstance(input, dict):
+ return dict([(byteify(key), byteify(value))
+ for key, value in input.iteritems()])
+ elif isinstance(input, list):
+ return [byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+opts = [
+ cfg.StrOpt(
+ 'compass_server',
+ help='compass server url',
+ default='http://127.0.0.1/api'
+ ),
+ cfg.StrOpt(
+ 'compass_user_email',
+ help='compass user email',
+ default='admin@huawei.com'
+ ),
+ cfg.StrOpt(
+ 'compass_user_password',
+ help='compass user password',
+ default='admin'
+ ),
+ cfg.StrOpt(
+ 'switch_ips',
+ help='comma seperated switch ips',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'switch_credential',
+ help='comma separated <credential key>=<credential value>',
+ default='version=2c,community=public'
+ ),
+ cfg.IntOpt(
+ 'switch_max_retries',
+ help='max retries of poll switch',
+ default=10
+ ),
+ cfg.IntOpt(
+ 'switch_retry_interval',
+ help='interval to repoll switch',
+ default=10
+ ),
+ cfg.BoolOpt(
+ 'poll_switches',
+ help='if the client polls switches',
+ default=True
+ ),
+ cfg.StrOpt(
+ 'machines',
+ help='comma separated mac addresses of machines',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'subnets',
+ help='comma seperated subnets',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'adapter_name',
+ help='adapter name',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'adapter_os_pattern',
+ help='adapter os name',
+ default=r'^(?i)centos.*'
+ ),
+ cfg.StrOpt(
+ 'adapter_target_system_pattern',
+ help='adapter target system name',
+ default='^openstack$'
+ ),
+ cfg.StrOpt(
+ 'adapter_flavor_pattern',
+ help='adapter flavor name',
+ default='allinone'
+ ),
+ cfg.StrOpt(
+ 'cluster_name',
+ help='cluster name',
+ default='cluster1'
+ ),
+ cfg.StrOpt(
+ 'language',
+ help='language',
+ default='EN'
+ ),
+ cfg.StrOpt(
+ 'timezone',
+ help='timezone',
+ default='GMT'
+ ),
+ cfg.StrOpt(
+ 'http_proxy',
+ help='http proxy',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'https_proxy',
+ help='https proxy',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'no_proxy',
+ help='no proxy',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'ntp_server',
+ help='ntp server',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'dns_servers',
+ help='dns servers',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'domain',
+ help='domain',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'search_path',
+ help='search path',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'local_repo_url',
+ help='local repo url',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'default_gateway',
+ help='default gateway',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'server_credential',
+ help=(
+ 'server credential formatted as '
+ '<username>=<password>'
+ ),
+ default='root=root'
+ ),
+ cfg.StrOpt(
+ 'os_config_json_file',
+ help='json formatted os config file',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'service_credentials',
+ help=(
+ 'comma seperated service credentials formatted as '
+ '<servicename>:<username>=<password>,...'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'console_credentials',
+ help=(
+ 'comma seperated console credential formated as '
+ '<consolename>:<username>=<password>'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'hostnames',
+ help='comma seperated hostname',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'host_networks',
+ help=(
+ 'semicomma seperated host name and its networks '
+ '<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscuous>,...'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'partitions',
+ help=(
+ 'comma seperated partitions '
+ '<partition name>=<partition_value>'
+ ),
+ default='tmp:percentage=10%,var:percentage=30%,home:percentage=30%'
+ ),
+ cfg.StrOpt(
+ 'network_mapping',
+ help=(
+ 'comma seperated network mapping '
+ '<network_type>=<interface_name>'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'package_config_json_file',
+ help='json formatted os config file',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'host_roles',
+ help=(
+ 'semicomma separated host roles '
+ '<hostname>=<comma separated roles>'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'default_roles',
+ help=(
+ 'comma seperated default roles '
+ '<rolename>'
+ ),
+ default=''
+ ),
+ cfg.IntOpt(
+ 'action_timeout',
+ help='action timeout in seconds',
+ default=60
+ ),
+ cfg.IntOpt(
+ 'deployment_timeout',
+ help='deployment timeout in minutes',
+ default=60
+ ),
+ cfg.IntOpt(
+ 'progress_update_check_interval',
+ help='progress update status check interval in seconds',
+ default=60
+ ),
+ cfg.StrOpt(
+ 'dashboard_url',
+ help='dashboard url',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'dashboard_link_pattern',
+ help='dashboard link pattern',
+ default=r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)'
+ ),
+ cfg.StrOpt(
+ 'cluster_vip',
+ help='cluster ip address',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'enable_secgroup',
+ help='enable security group',
+ default='true'
+ ),
+ cfg.StrOpt(
+ 'network_cfg',
+ help='netowrk config file',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'neutron_cfg',
+ help='netowrk config file',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'cluster_pub_vip',
+ help='cluster ip address',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'cluster_prv_vip',
+ help='cluster ip address',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'repo_name',
+ help='repo name',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'deploy_type',
+ help='deploy type',
+ default='virtual'
+ ),
+]
+CONF.register_cli_opts(opts)
+
+
+def is_role_unassigned(role):
+ return role
+
+
+def _load_config(config_filename):
+ if not config_filename:
+ return {}
+ with open(config_filename) as config_file:
+ content = config_file.read()
+ return json.loads(content)
+
+
+class CompassClient(object):
+ def __init__(self):
+ LOG.info("xh: compass_server=%s" % CONF.compass_server)
+ self.client = Client(CONF.compass_server)
+ self.subnet_mapping = {}
+ self.role_mapping = {}
+ self.host_mapping = {}
+ self.host_ips = defaultdict(list)
+ self.host_roles = {}
+
+ self.login()
+
+ def is_ok(self, status):
+ if status < 300 and status >= 200:
+ return True
+
+ def login(self):
+ status, resp = self.client.get_token(
+ CONF.compass_user_email,
+ CONF.compass_user_password
+ )
+
+ LOG.info(
+ 'login status: %s, resp: %s',
+ status, resp
+ )
+ if self.is_ok(status):
+ return resp["token"]
+ else:
+ raise Exception(
+ 'failed to login %s with user %s',
+ CONF.compass_server,
+ CONF.compass_user_email
+ )
+
+ def get_machines(self):
+ status, resp = self.client.list_machines()
+ LOG.info(
+ 'get all machines status: %s, resp: %s', status, resp)
+ if not self.is_ok(status):
+ raise RuntimeError('failed to get machines')
+
+ machines_to_add = list(set([
+ machine for machine in CONF.machines.split(',')
+ if machine
+ ]))
+
+ LOG.info('machines to add: %s', machines_to_add)
+ machines_db = [str(m["mac"]) for m in resp]
+ LOG.info('machines in db: %s', machines_db)
+ assert(set(machines_db) == set(machines_to_add))
+
+ return [m["id"] for m in resp]
+
+ def get_adapter(self):
+ """get adapter."""
+ status, resp = self.client.list_adapters(name=CONF.adapter_name)
+ LOG.info(
+ 'get all adapters status: %s, resp: %s',
+ status, resp
+ )
+
+ if not self.is_ok(status) or not resp:
+ raise RuntimeError('failed to get adapters')
+
+ os_re = re.compile(CONF.adapter_os_pattern)
+ flavor_re = re.compile(CONF.adapter_flavor_pattern)
+
+ adapter_id = None
+ os_id = None
+ flavor_id = None
+ adapter = None
+
+ adapter = resp[0]
+ adapter_id = adapter['id']
+ for supported_os in adapter['supported_oses']:
+ if not os_re or os_re.match(supported_os['name']):
+ os_id = supported_os['os_id']
+ break
+
+ if 'flavors' in adapter:
+ for flavor in adapter['flavors']:
+ if not flavor_re or flavor_re.match(flavor['name']):
+ flavor_id = flavor['id']
+ break
+
+ assert(os_id and flavor_id)
+ return (adapter_id, os_id, flavor_id)
+
+ def add_subnets(self):
+ subnets = [
+ subnet for subnet in CONF.subnets.split(',')
+ if subnet
+ ]
+
+ assert(subnets)
+
+ subnet_mapping = {}
+ for subnet in subnets:
+ try:
+ netaddr.IPNetwork(subnet)
+ except Exception:
+ raise RuntimeError('subnet %s format is invalid' % subnet)
+
+ status, resp = self.client.add_subnet(subnet)
+ LOG.info(
+ 'add subnet %s status %s response %s',
+ subnet,
+ status,
+ resp
+ )
+ if not self.is_ok(status):
+ raise RuntimeError('failed to add subnet %s' % subnet)
+
+ subnet_mapping[resp['subnet']] = resp['id']
+
+ self.subnet_mapping = subnet_mapping
+
+ def add_cluster(self, adapter_id, os_id, flavor_id):
+ """add a cluster."""
+ cluster_name = CONF.cluster_name
+ assert(cluster_name)
+ status, resp = self.client.add_cluster(
+ cluster_name, adapter_id,
+ os_id, flavor_id)
+
+ if not self.is_ok(status):
+ raise RuntimeError("add cluster failed")
+
+ LOG.info(
+ 'add cluster %s status: %s resp:%s',
+ cluster_name,
+ status,
+ resp
+ )
+
+ if isinstance(resp, list):
+ cluster = resp[0]
+ else:
+ cluster = resp
+
+ cluster_id = cluster['id']
+ flavor = cluster.get('flavor', {})
+ roles = flavor.get('roles', [])
+
+ for role in roles:
+ if role.get('optional', False):
+ self.role_mapping[role['name']] = ROLE_ASSIGNED
+ else:
+ self.role_mapping[role['name']] = ROLE_UNASSIGNED
+
+ return cluster_id
+
+ def add_cluster_hosts(self, cluster_id, machines):
+ hostnames = [
+ hostname for hostname in CONF.hostnames.split(',')
+ if hostname
+ ]
+
+ assert(len(machines) == len(hostnames))
+
+ machines_dict = []
+ for machine_id, hostname in zip(machines, hostnames):
+ machines_dict.append({
+ 'machine_id': machine_id,
+ 'name': hostname
+ })
+
+ # add hosts to the cluster.
+ status, resp = self.client.add_hosts_to_cluster(
+ cluster_id,
+ {'machines': machines_dict})
+
+ LOG.info(
+ 'add machines %s to cluster %s status: %s, resp: %s',
+ machines_dict,
+ cluster_id,
+ status,
+ resp
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("add host to cluster failed")
+
+ for host in resp['hosts']:
+ self.host_mapping[host['hostname']] = host['id']
+
+ assert(len(self.host_mapping) == len(machines))
+
+ def set_cluster_os_config(self, cluster_id):
+ """set cluster os config."""
+ os_config = {}
+ language = CONF.language
+ timezone = CONF.timezone
+ http_proxy = CONF.http_proxy
+ https_proxy = CONF.https_proxy
+ local_repo_url = CONF.local_repo_url
+ repo_name = CONF.repo_name
+ deploy_type = CONF.deploy_type
+ if not https_proxy and http_proxy:
+ https_proxy = http_proxy
+
+ no_proxy = [
+ no_proxy for no_proxy in CONF.no_proxy.split(',')
+ if no_proxy
+ ]
+
+ compass_server = CONF.compass_server
+ if http_proxy:
+ for hostname, ips in self.host_ips.items():
+ no_proxy.append(hostname)
+ no_proxy.extend(ips)
+
+ ntp_server = CONF.ntp_server or compass_server
+
+ dns_servers = [
+ dns_server for dns_server in CONF.dns_servers.split(',')
+ if dns_server
+ ]
+ if not dns_servers:
+ dns_servers = [compass_server]
+
+ domain = CONF.domain
+ if not domain:
+ raise Exception('domain is not defined')
+
+ search_path = [
+ search_path for search_path in CONF.search_path.split(',')
+ if search_path
+ ]
+
+ if not search_path:
+ search_path = [domain]
+
+ default_gateway = CONF.default_gateway
+ if not default_gateway:
+ raise Exception('default gateway is not defined')
+
+ general_config = {
+ 'language': language,
+ 'timezone': timezone,
+ 'ntp_server': ntp_server,
+ 'dns_servers': dns_servers,
+ 'default_gateway': default_gateway
+ }
+
+ if http_proxy:
+ general_config['http_proxy'] = http_proxy
+ if https_proxy:
+ general_config['https_proxy'] = https_proxy
+ if no_proxy:
+ general_config['no_proxy'] = no_proxy
+ if domain:
+ general_config['domain'] = domain
+ if search_path:
+ general_config['search_path'] = search_path
+ if local_repo_url:
+ general_config['local_repo'] = local_repo_url
+ if repo_name:
+ general_config['repo_name'] = repo_name
+ if deploy_type:
+ general_config['deploy_type'] = deploy_type
+
+ os_config["general"] = general_config
+
+ server_credential = CONF.server_credential
+ if '=' in server_credential:
+ server_username, server_password = server_credential.split('=', 1)
+ elif server_credential:
+ server_username = server_password = server_credential
+ else:
+ server_username = 'root'
+ server_password = 'root'
+
+ os_config['server_credentials'] = {
+ 'username': server_username,
+ 'password': server_password
+ }
+
+ partitions = [
+ partition for partition in CONF.partitions.split(',')
+ if partition
+ ]
+
+ partition_config = {}
+ for partition in partitions:
+ assert("=" in partition)
+
+ partition_name, partition_value = partition.split('=', 1)
+ partition_name = partition_name.strip()
+ partition_value = partition_value.strip()
+
+ assert(partition_name and partition_value)
+
+ if partition_value.endswith('%'):
+ partition_type = 'percentage'
+ partition_value = int(partition_value[:-1])
+ else:
+ partition_type = 'size'
+
+ partition_config[partition_name] = {
+ partition_type: partition_value
+ }
+
+ os_config['partition'] = partition_config
+
+ """
+ os_config_filename = CONF.os_config_json_file
+ if os_config_filename:
+ util.merge_dict(
+ os_config, _load_config(os_config_filename)
+ )
+ """
+
+ status, resp = self.client.update_cluster_config(
+ cluster_id, os_config=os_config)
+ LOG.info(
+ 'set os config %s to cluster %s status: %s, resp: %s',
+ os_config, cluster_id, status, resp)
+ if not self.is_ok(status):
+ raise RuntimeError('failed to set os config %s to cluster %s'
+ % (os_config, cluster_id))
+
+ def set_host_networking(self):
+ """set cluster hosts networking."""
+ def get_subnet(ip_str):
+ try:
+ LOG.info("subnets: %s" % self.subnet_mapping.keys())
+ ip = netaddr.IPAddress(ip_str)
+ for cidr, subnet_id in self.subnet_mapping.items():
+ subnet = netaddr.IPNetwork(cidr)
+ if ip in subnet:
+ return True, subnet_id
+
+ LOG.info("ip %s not in %s" % (ip_str, cidr))
+ return False, None
+ except Exception:
+ LOG.exception("ip addr %s is invalid" % ip_str)
+ return False, None
+
+ for host_network in CONF.host_networks.split(';'):
+ hostname, networks_str = host_network.split(':', 1)
+ hostname = hostname.strip()
+ networks_str = networks_str.strip()
+
+ assert(hostname in self.host_mapping)
+
+ host_id = self.host_mapping[hostname]
+ intf_list = networks_str.split(',')
+ for intf_str in intf_list:
+ interface, intf_properties = intf_str.split('=', 1)
+ intf_properties = intf_properties.strip().split('|')
+
+ assert(intf_properties)
+ ip_str = intf_properties[0]
+
+ status, subnet_id = get_subnet(ip_str)
+ if not status:
+ raise RuntimeError("ip addr %s is invalid" % ip_str)
+
+ properties = dict([
+ (intf_property, True)
+ for intf_property in intf_properties[1:]
+ ])
+
+ LOG.info(
+ 'add host %s interface %s ip %s network proprties %s',
+ hostname, interface, ip_str, properties)
+
+ status, response = self.client.add_host_network(
+ host_id, interface, ip=ip_str, subnet_id=subnet_id,
+ **properties
+ )
+
+ LOG.info(
+ 'add host %s interface %s ip %s network properties %s '
+ 'status %s: %s',
+ hostname, interface, ip_str, properties,
+ status, response
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("add host network failed")
+
+ self.host_ips[hostname].append(ip_str)
+
+ def set_cluster_package_config(self, cluster_id):
+ """set cluster package config."""
+ package_config = {"security": {}}
+
+ service_credentials = [
+ service_credential
+ for service_credential in CONF.service_credentials.split(',')
+ if service_credential
+ ]
+
+ service_credential_cfg = {}
+ LOG.info(
+ 'service credentials: %s', service_credentials
+ )
+
+ for service_credential in service_credentials:
+ if ':' not in service_credential:
+ raise Exception(
+ 'no : in service credential %s' % service_credential
+ )
+ service_name, service_pair = service_credential.split(':', 1)
+ if '=' not in service_pair:
+ raise Exception(
+ 'there is no = in service %s security' % service_name
+ )
+
+ username, password = service_pair.split('=', 1)
+ service_credential_cfg[service_name] = {
+ 'username': username,
+ 'password': password
+ }
+
+ console_credentials = [
+ console_credential
+ for console_credential in CONF.console_credentials.split(',')
+ if console_credential
+ ]
+
+ LOG.info(
+ 'console credentials: %s', console_credentials
+ )
+
+ console_credential_cfg = {}
+ for console_credential in console_credentials:
+ if ':' not in console_credential:
+ raise Exception(
+ 'there is no : in console credential %s'
+ % console_credential
+ )
+ console_name, console_pair = console_credential.split(':', 1)
+ if '=' not in console_pair:
+ raise Exception(
+ 'there is no = in console %s security' % console_name
+ )
+ username, password = console_pair.split('=', 1)
+ console_credential_cfg[console_name] = {
+ 'username': username,
+ 'password': password
+ }
+
+ package_config["security"] = {
+ "service_credentials": service_credential_cfg,
+ "console_credentials": console_credential_cfg
+ }
+
+ network_mapping = dict([
+ network_pair.split('=', 1)
+ for network_pair in CONF.network_mapping.split(',')
+ if '=' in network_pair
+ ])
+
+ package_config['network_mapping'] = network_mapping
+
+ assert(os.path.exists(CONF.network_cfg))
+ network_cfg = yaml.load(open(CONF.network_cfg))
+ package_config["network_cfg"] = network_cfg
+
+ assert(os.path.exists(CONF.neutron_cfg))
+ neutron_cfg = yaml.load(open(CONF.neutron_cfg))
+ package_config["neutron_config"] = neutron_cfg
+
+ """
+ package_config_filename = CONF.package_config_json_file
+ if package_config_filename:
+ util.merge_dict(
+ package_config, _load_config(package_config_filename)
+ )
+ """
+ package_config['ha_proxy'] = {}
+ if CONF.cluster_vip:
+ package_config["ha_proxy"]["vip"] = CONF.cluster_vip
+
+ package_config['enable_secgroup'] = (CONF.enable_secgroup == "true")
+
+ status, resp = self.client.update_cluster_config(
+ cluster_id, package_config=package_config)
+ LOG.info(
+ 'set package config %s to cluster %s status: %s, resp: %s',
+ package_config, cluster_id, status, resp)
+
+ if not self.is_ok(status):
+ raise RuntimeError("set cluster package_config failed")
+
+ def set_host_roles(self, cluster_id, host_id, roles):
+ status, response = self.client.update_cluster_host(
+ cluster_id, host_id, roles=roles)
+
+ LOG.info(
+ 'set cluster %s host %s roles %s status %s: %s',
+ cluster_id, host_id, roles, status, response
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("set host roles failed")
+
+ for role in roles:
+ if role in self.role_mapping:
+ self.role_mapping[role] = ROLE_ASSIGNED
+
+ def set_all_hosts_roles(self, cluster_id):
+ for host_str in CONF.host_roles.split(';'):
+ host_str = host_str.strip()
+ hostname, roles_str = host_str.split('=', 1)
+
+ assert(hostname in self.host_mapping)
+ host_id = self.host_mapping[hostname]
+
+ roles = [role.strip() for role in roles_str.split(',') if role]
+
+ self.set_host_roles(cluster_id, host_id, roles)
+ self.host_roles[hostname] = roles
+
+ unassigned_hostnames = list(set(self.host_mapping.keys())
+ - set(self.host_roles.keys()))
+
+ unassigned_roles = [role for role, status in self.role_mapping.items()
+ if is_role_unassigned(status)]
+
+ assert(len(unassigned_hostnames) >= len(unassigned_roles))
+
+ for hostname, role in map(
+ None,
+ unassigned_hostnames,
+ unassigned_roles
+ ):
+ host_id = self.host_mapping[hostname]
+ self.set_host_roles(cluster_id, host_id, [role])
+ self.host_roles[hostname] = [role]
+
+ unassigned_hostnames = list(set(self.host_mapping.keys())
+ - set(self.host_roles.keys()))
+
+ if not unassigned_hostnames:
+ return
+
+ # assign default roles to unassigned hosts
+ default_roles = [
+ role for role in CONF.default_roles.split(',')
+ if role
+ ]
+
+ assert(default_roles)
+
+ cycle_roles = itertools.cycle(default_roles)
+ for hostname in unassigned_hostnames:
+ host_id = self.host_mapping[hostname]
+ roles = [cycle_roles.next()]
+ self.set_host_roles(cluster_id, host_id, roles)
+ self.host_roles[hostname] = roles
+
+ def deploy_clusters(self, cluster_id):
+ host_ids = self.host_mapping.values()
+
+ status, response = self.client.review_cluster(
+ cluster_id, review={'hosts': host_ids}
+ )
+ LOG.info(
+ 'review cluster %s hosts %s, status %s: %s',
+ cluster_id, host_ids, status, response
+ )
+
+ # TODO('what this doning?')
+ if not self.is_ok(status):
+ raise RuntimeError("review cluster host failed")
+
+ status, response = self.client.deploy_cluster(
+ cluster_id, deploy={'hosts': host_ids}
+ )
+ LOG.info(
+ 'deploy cluster %s hosts %s status %s: %s',
+ cluster_id, host_ids, status, response
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("deploy cluster failed")
+
+ def get_installing_progress(self, cluster_id):
+ """get intalling progress."""
+ action_timeout = time.time() + 60 * float(CONF.action_timeout)
+ deployment_timeout = time.time() + 60 * float(
+ CONF.deployment_timeout)
+
+ current_time = time.time()
+ deployment_failed = True
+ while current_time < deployment_timeout:
+ status, cluster_state = self.client.get_cluster_state(cluster_id)
+ LOG.info(
+ 'get cluster %s state status %s: %s',
+ cluster_id, status, cluster_state
+ )
+ if not self.is_ok(status):
+ raise RuntimeError("can not get cluster state")
+
+ if cluster_state['state'] in ['UNINITIALIZED', 'INITIALIZED']:
+ if current_time >= action_timeout:
+ deployment_failed = True
+ break
+ else:
+ continue
+
+ elif cluster_state['state'] == 'SUCCESSFUL':
+ deployment_failed = False
+ break
+ elif cluster_state['state'] == 'ERROR':
+ deployment_failed = True
+ break
+
+ if deployment_failed:
+ raise RuntimeError("deploy cluster failed")
+
+ def check_dashboard_links(self, cluster_id):
+ dashboard_url = CONF.dashboard_url
+ if not dashboard_url:
+ LOG.info('no dashboarde url set')
+ return
+ dashboard_link_pattern = re.compile(
+ CONF.dashboard_link_pattern)
+ r = requests.get(dashboard_url, verify=False)
+ r.raise_for_status()
+ match = dashboard_link_pattern.search(r.text)
+ if match:
+ LOG.info(
+ 'dashboard login page for cluster %s can be downloaded',
+ cluster_id)
+ else:
+ msg = (
+ '%s failed to be downloaded\n'
+ 'the context is:\n%s\n'
+ ) % (dashboard_url, r.text)
+ raise Exception(msg)
+
+
+def main():
+ client = CompassClient()
+ machines = client.get_machines()
+
+ LOG.info('machines are %s', machines)
+
+ client.add_subnets()
+ adapter_id, os_id, flavor_id = client.get_adapter()
+ cluster_id = client.add_cluster(adapter_id, os_id, flavor_id)
+
+ client.add_cluster_hosts(cluster_id, machines)
+ client.set_host_networking()
+ client.set_cluster_os_config(cluster_id)
+
+ if flavor_id:
+ client.set_cluster_package_config(cluster_id)
+
+ client.set_all_hosts_roles(cluster_id)
+ client.deploy_clusters(cluster_id)
+
+ client.get_installing_progress(cluster_id)
+ client.check_dashboard_links(cluster_id)
+
+if __name__ == "__main__":
+ CONF(args=sys.argv[1:])
+ main()
diff --git a/compass-deck/bin/client.sh b/compass-deck/bin/client.sh
new file mode 100755
index 0000000..48c70e2
--- /dev/null
+++ b/compass-deck/bin/client.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+/opt/compass/bin/client.py --switch_ips=172.29.8.40 --machines=00:0c:29:a7:ea:4b --adapter_name=os_only --adapter_flavor_pattern= --subnets=10.145.88.0/23,172.16.0.0/16 --cluster_name=cluster1 --domain=ods.com --default_gateway=10.145.88.1 --service_credentials= --console_credentials= --hostnames=host1 --host_networks="host1:eth0=10.145.89.201|is_mgmt,eth1=172.16.100.201|is_promiscuous" --partitions="/var=50%,/home=30%" --network_mapping= --host_roles= --dashboard_url=
diff --git a/compass-deck/bin/cobbler/remove_systems.sh b/compass-deck/bin/cobbler/remove_systems.sh
new file mode 100755
index 0000000..1973d43
--- /dev/null
+++ b/compass-deck/bin/cobbler/remove_systems.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+systems=$(cobbler system list)
+echo "remove systems: $systems"
+for system in $systems; do
+ cobbler system remove --name=$system
+ if [[ "$?" != "0" ]]; then
+ echo "failed to remove system %s"
+ fi
+done
diff --git a/compass-deck/bin/compass_check.py b/compass-deck/bin/compass_check.py
new file mode 100755
index 0000000..5fc7e69
--- /dev/null
+++ b/compass-deck/bin/compass_check.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""compass health check."""
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+import compass.actions.cli as cli
+
+sys.exit(cli.main())
diff --git a/compass-deck/bin/compass_wsgi.py b/compass-deck/bin/compass_wsgi.py
new file mode 100755
index 0000000..9e889e7
--- /dev/null
+++ b/compass-deck/bin/compass_wsgi.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""compass wsgi module."""
+import os
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+flags.init()
+flags.OPTIONS.logfile = setting.WEB_LOGFILE
+logsetting.init()
+
+
+from compass.api import api as compass_api
+
+
+compass_api.init()
+application = compass_api.app
diff --git a/compass-deck/bin/compassd b/compass-deck/bin/compassd
new file mode 100755
index 0000000..fc77bb9
--- /dev/null
+++ b/compass-deck/bin/compassd
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+RETVAL_CELERY=0
+RETVAL_PROGRESS_UPDATE=0
+start() {
+ service compass-celeryd start
+ RETVAL_CELERY=$?
+ service compass-progress-updated start
+ RETVAL_PROGRESS_UPDATE=$?
+}
+
+stop() {
+ service compass-celeryd stop
+ RETVAL_CELERY=$?
+ service compass-progress-updated stop
+ RETVAL_PROGRESS_UPDATE=$?
+}
+
+restart() {
+ stop
+ start
+}
+case "$1" in
+ start|stop|restart)
+ $1
+ ;;
+ status)
+ service compass-celeryd status
+ RETVAL_CELERY=$?
+ service compass-progress-updated status
+ RETVAL_PROGRESS_UPDATE=$?
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|restart}"
+ exit 1
+ ;;
+esac
+if [[ "$RETVAL_CELERY" != "0" ]]; then
+ exit $RETVAL_CELERY
+fi
+if [[ "$RETVAL_PROGRESS_UPDATE" != "0" ]]; then
+ exit $RETVAL_PROGRESS_UPDATE
+fi
diff --git a/compass-deck/bin/csvdeploy.py b/compass-deck/bin/csvdeploy.py
new file mode 100755
index 0000000..23b0c46
--- /dev/null
+++ b/compass-deck/bin/csvdeploy.py
@@ -0,0 +1,333 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""deploy cluster from csv file."""
+import ast
+import copy
+import csv
+import os
+import re
+import sys
+
+from multiprocessing import Process
+from multiprocessing import Queue
+from optparse import OptionParser
+
+try:
+ from compass.apiclient.restful import Client
+except ImportError:
+ curr_dir = os.path.dirname(os.path.realpath(__file__))
+ apiclient_dir = os.path.dirname(curr_dir) + '/compass/apiclient'
+ sys.path.append(apiclient_dir)
+ from restful import Client
+
+
+DELIMITER = ","
+
+# Sqlite tables
+TABLES = {
+ 'switch_config': {'columns': ['id', 'ip', 'filter_port']},
+ 'switch': {'columns': ['id', 'ip', 'credential_data']},
+ 'machine': {'columns': ['id', 'mac', 'port', 'vlan', 'switch_id']},
+ 'cluster': {'columns': ['id', 'name', 'security_config',
+ 'networking_config', 'partition_config',
+ 'adapter_id', 'state']},
+ 'cluster_host': {'columns': ['id', 'cluster_id', 'hostname', 'machine_id',
+ 'config_data', 'state']},
+ 'adapter': {'columns': ['id', 'name', 'os', 'target_system']},
+ 'role': {'columns': ['id', 'name', 'target_system', 'description']}
+}
+
+
+def start(csv_dir, compass_url):
+ """Start deploy both failed clusters and new clusters."""
+ # Get clusters and hosts data from CSV
+ clusters_data = get_csv('cluster.csv', csv_dir)
+ hosts_data = get_csv('cluster_host.csv', csv_dir)
+ data = {}
+ for cluster in clusters_data:
+ tmp = {}
+ tmp['cluster_data'] = cluster
+ tmp['hosts_data'] = []
+ data[cluster['id']] = tmp
+
+ for host in hosts_data:
+ cluster_id = host['cluster_id']
+ if cluster_id not in data:
+ print ("Unknown cluster_id=%s of the host! host_id=%s!"
+ % (cluster_id, host['id']))
+ sys.exit(1)
+
+ data[cluster_id]['hosts_data'].append(host)
+
+ apiClient = _APIClient(compass_url)
+ results_q = Queue()
+ ps = []
+ for elem in data:
+ cluster_data = data[elem]['cluster_data']
+ hosts_data = data[elem]['hosts_data']
+ p = Process(target=apiClient.execute,
+ args=(cluster_data, hosts_data, results_q))
+ ps.append(p)
+ p.start()
+
+ for p in ps:
+ p.join()
+
+ progress_file = '/'.join((csv_dir, 'progress.csv'))
+ write_progress_to_file(results_q, progress_file)
+
+
+def write_progress_to_file(results_q, progress_file):
+ cluster_headers = ['cluster_id', 'progress_url']
+ host_headers = ['host_id', 'progress_url']
+
+ with open(progress_file, 'wb') as f:
+ print "Writing all progress information to %s......" % progress_file
+ writer = csv.writer(f, delimiter=DELIMITER, quoting=csv.QUOTE_MINIMAL)
+ while not results_q.empty():
+ record = results_q.get()
+ hosts = []
+ cluster = [record['deployment']['cluster']['cluster_id'],
+ record['deployment']['cluster']['url']]
+ writer.writerow(cluster_headers)
+ writer.writerow(cluster)
+
+ for elem in record['deployment']['hosts']:
+ host = [elem['host_id'], elem['url']]
+ hosts.append(host)
+
+ writer.writerow(host_headers)
+ writer.writerows(hosts)
+ print "Done!\n"
+
+
+def get_csv(fname, csv_dir):
+ """Parse csv files into python variables.
+
+ .. note::
+ all nested fields in db will be assembled.
+
+ :param fname: CSV file name
+ :param csv_dir: CSV files directory
+
+ :returns: list of dict which key is column name and value is its data.
+ """
+ headers = []
+ rows = []
+ file_dir = '/'.join((csv_dir, fname))
+ with open(file_dir) as f:
+ reader = csv.reader(f, delimiter=DELIMITER, quoting=csv.QUOTE_MINIMAL)
+ headers = reader.next()
+ rows = [x for x in reader]
+
+ result = []
+ for row in rows:
+ data = {}
+ for col_name, value in zip(headers, row):
+ if re.match(r'^[\d]+$', value):
+ # the value should be an integer
+ value = int(value)
+ elif re.match(r'^\[(\'\w*\'){1}(\s*,\s*\'\w*\')*\]$', value):
+ # the value should be a list
+ value = ast.literal_eval(value)
+ elif value == 'None':
+ value = ''
+
+ if col_name.find('.') > 0:
+ tmp_result = {}
+ tmp_result[col_name.split('.')[-1]] = value
+ keys = col_name.split('.')[::-1][1:]
+ for key in keys:
+ tmp = {}
+ tmp[key] = tmp_result
+ tmp_result = tmp
+ merge_dict(data, tmp_result)
+ else:
+ data[col_name] = value
+
+ result.append(data)
+
+ return result
+
+
+def merge_dict(lhs, rhs, override=True):
+ """Merge nested right dict into left nested dict recursively.
+
+ :param lhs: dict to be merged into.
+ :type lhs: dict
+ :param rhs: dict to merge from.
+ :type rhs: dict
+ :param override: the value in rhs overide the value in left if True.
+ :type override: str
+
+ :raises: TypeError if lhs or rhs is not a dict.
+ """
+ if not rhs:
+ return
+
+ if not isinstance(lhs, dict):
+ raise TypeError('lhs type is %s while expected is dict' % type(lhs),
+ lhs)
+
+ if not isinstance(rhs, dict):
+ raise TypeError('rhs type is %s while expected is dict' % type(rhs),
+ rhs)
+
+ for key, value in rhs.items():
+ if isinstance(value, dict) and key in lhs and isinstance(lhs[key],
+ dict):
+ merge_dict(lhs[key], value, override)
+ else:
+ if override or key not in lhs:
+ lhs[key] = copy.deepcopy(value)
+
+
+class _APIClient(Client):
+ def __init__(self, url, headers=None, proxies=None, stream=None):
+ super(_APIClient, self).__init__(url, headers, proxies, stream)
+
+ def set_cluster_resource(self, cluster_id, resource, data):
+ url = "/clusters/%d/%s" % (cluster_id, resource)
+ return self._put(url, data=data)
+
+ def execute(self, cluster_data, hosts_data, resp_results):
+ """The process includes creating or updating a cluster.
+
+ The cluster configuration, add or update a host in the cluster,
+ and deploy the updated hosts.
+
+ :param cluster_data: the dictionary of cluster data
+ """
+ cluster_id = cluster_data['id']
+ code, resp = self.get_cluster(cluster_id)
+ if code == 404:
+ # Create a new cluster
+ name = cluster_data['name']
+ adapter_id = cluster_data['adapter_id']
+ code, resp = self.add_cluster(name, adapter_id)
+
+ if code != 200:
+ print ("Failed to create the cluster which name is "
+ "%s!\nError message: %s" % (name, resp['message']))
+ sys.exit(1)
+
+ # Update the config(security, networking, partition) of the cluster
+ security_req = {}
+ networking_req = {}
+ partition_req = {}
+
+ security_req['security'] = cluster_data['security_config']
+ networking_req['networking'] = cluster_data['networking_config']
+ partition_req['partition'] = cluster_data['partition_config']
+
+ print "Update Security config......."
+ code, resp = self.set_cluster_resource(cluster_id, 'security',
+ security_req)
+ if code != 200:
+ print ("Failed to update Security config for cluster id=%s!\n"
+ "Error message: " % (cluster_id, resp['message']))
+ sys.exit(1)
+
+ print "Update Networking config......."
+ code, resp = self.set_cluster_resource(cluster_id, 'networking',
+ networking_req)
+ if code != 200:
+ print ("Failed to update Networking config for cluster id=%s!\n"
+ "Error message: %s" % (cluster_id, resp['message']))
+ sys.exit(1)
+
+ print "Update Partition config......."
+ code, resp = self.set_cluster_resource(cluster_id, 'partition',
+ partition_req)
+ if code != 200:
+ print ("Failed to update Partition config for cluster id=%s!\n"
+ "Error message: " % (cluster_id, resp['message']))
+ sys.exit(1)
+
+ deploy_list = []
+ deploy_hosts_data = []
+
+ machines_list = []
+ new_hosts_data = []
+ for record in hosts_data:
+ if record['state'] and int(record['deploy_action']):
+ deploy_list.append(record['id'])
+ deploy_hosts_data.append(record)
+
+ elif int(record['deploy_action']):
+ machines_list.append(record['machine_id'])
+ new_hosts_data.append(record)
+
+ if machines_list:
+ # add new hosts to the cluster
+ code, resp = self.add_hosts(cluster_id, machines_list)
+ if code != 200:
+ print ("Failed to add hosts to the cluster id=%s!\n"
+ "Error message: %s.\nfailed hosts are %s"
+ % (cluster_id, resp['message'], resp['failedMachines']))
+ sys.exit(1)
+
+ for record, host in zip(new_hosts_data, resp['cluster_hosts']):
+ record['id'] = host['id']
+ deploy_list.append(host['id'])
+ deploy_hosts_data.append(record)
+
+ # Update the config of each host in the cluster
+ for host in deploy_hosts_data:
+ req = {}
+ host_id = host['id']
+ print "Updating the config of host id=%s" % host['id']
+ req['hostname'] = host['hostname']
+ req.update(host['config_data'])
+ code, resp = self.update_host_config(int(host_id), raw_data=req)
+ if code != 200:
+ print ("Failed to update the config of the host id=%s!\n"
+ "Error message: %s" % (host_id, resp['message']))
+ sys.exit(1)
+
+ # Start to deploy the cluster
+ print "Start to deploy the cluster!....."
+ deploy_req = {"deploy": deploy_list}
+ code, resp = self.deploy_hosts(cluster_id, raw_data=deploy_req)
+ print "---Cluster Info---"
+ print "cluster_id url"
+ print (" %s %s"
+ % (resp['deployment']['cluster']['cluster_id'],
+ resp['deployment']['cluster']['url']))
+ print "---Hosts Info-----"
+ print "host_id url"
+ for host in resp['deployment']['hosts']:
+ print " %s %s" % (host['host_id'], host['url'])
+ print "---------------------------------------------------------------"
+ print "\n"
+ resp_results.put(resp)
+
+
+if __name__ == "__main__":
+ usage = "usage: %prog [options]"
+ parser = OptionParser(usage)
+
+ parser.add_option("-d", "--csv-dir", dest="csv_dir",
+ help="The directory of CSV files used for depolyment")
+ parser.add_option("-u", "--compass-url", dest="compass_url",
+ help="The URL of Compass server")
+ (options, args) = parser.parse_args()
+
+ if not os.exists(options.csv_dir):
+ print "Cannot find the directory: %s" % options.csv_dir
+
+ start(options.csv_dir, options.compass_url)
diff --git a/compass-deck/bin/delete_clusters.py b/compass-deck/bin/delete_clusters.py
new file mode 100755
index 0000000..fddec17
--- /dev/null
+++ b/compass-deck/bin/delete_clusters.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Scripts to delete cluster and it hosts"""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+
+from compass.db.api import cluster as cluster_api
+from compass.db.api import database
+from compass.db.api import host as host_api
+from compass.db.api import user as user_api
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+flags.add('clusternames',
+ help='comma seperated cluster names',
+ default='')
+flags.add_bool('delete_hosts',
+ help='if all hosts related to the cluster will be deleted',
+ default=False)
+
+
+def delete_clusters():
+ clusternames = [
+ clustername
+ for clustername in flags.OPTIONS.clusternames.split(',')
+ if clustername
+ ]
+ user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
+ list_cluster_args = {}
+ if clusternames:
+ list_cluster_args['name'] = clusternames
+ clusters = cluster_api.list_clusters(
+ user=user, **list_cluster_args
+ )
+ delete_underlying_host = flags.OPTIONS.delete_hosts
+ for cluster in clusters:
+ cluster_id = cluster['id']
+ cluster_api.del_cluster(
+ cluster_id, True, False, delete_underlying_host, user=user
+ )
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ database.init()
+ delete_clusters()
diff --git a/compass-deck/bin/manage_db.py b/compass-deck/bin/manage_db.py
new file mode 100755
index 0000000..3e56433
--- /dev/null
+++ b/compass-deck/bin/manage_db.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""utility binary to manage database."""
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from flask.ext.script import Manager
+
+from compass.api import app
+from compass.db.api import database
+from compass.db.api import switch as switch_api
+from compass.db.api import user as user_api
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+flags.add('table_name',
+ help='table name',
+ default='')
+flags.add('clusters',
+ help=(
+ 'clusters and hosts of each cluster, the format is as '
+ 'clusterid:hostname1,hostname2,...;...'),
+ default='')
+flags.add_bool('async',
+ help='ryn in async mode',
+ default=True)
+flags.add('switch_machines_file',
+ help=(
+ 'files for switches and machines '
+ 'connected to each switch. each line in the file '
+ 'is machine,<switch ip>,<switch port>,<vlan>,<mac> '
+ 'or switch,<switch_ip>,<switch_vendor>,'
+ '<switch_version>,<switch_community>,<switch_state>'),
+ default='')
+flags.add('search_cluster_properties',
+ help='comma separated properties to search in cluster config',
+ default='')
+flags.add('print_cluster_properties',
+ help='comma separated cluster config properties to print',
+ default='')
+flags.add('search_host_properties',
+ help='comma separated properties to search in host config',
+ default='')
+flags.add('print_host_properties',
+ help='comma separated host config properties to print',
+ default='')
+
+
+app_manager = Manager(app, usage="Perform database operations")
+
+
+TABLE_MAPPING = {
+}
+
+
+@app_manager.command
+def list_config():
+ "List the commands."
+ for key, value in app.config.items():
+ print key, value
+
+
+@app_manager.command
+def checkdb():
+ """check if db exists."""
+ if setting.DATABASE_TYPE == 'file':
+ if os.path.exists(setting.DATABASE_FILE):
+ sys.exit(0)
+ else:
+ sys.exit(1)
+
+ sys.exit(0)
+
+
+@app_manager.command
+def createdb():
+ """Creates database from sqlalchemy models."""
+ database.init()
+ try:
+ database.drop_db()
+ except Exception:
+ pass
+
+ if setting.DATABASE_TYPE == 'file':
+ if os.path.exists(setting.DATABASE_FILE):
+ os.remove(setting.DATABASE_FILE)
+ database.create_db()
+ if setting.DATABASE_TYPE == 'file':
+ os.chmod(setting.DATABASE_FILE, 0o777)
+
+
+@app_manager.command
+def dropdb():
+ """Drops database from sqlalchemy models."""
+ database.init()
+ database.drop_db()
+
+
+@app_manager.command
+def set_switch_machines():
+ """Set switches and machines.
+
+ .. note::
+ --switch_machines_file is the filename which stores all switches
+ and machines information.
+ each line in fake_switches_files presents one machine.
+ the format of each line machine,<switch_ip>,<switch_port>,<vlan>,<mac>
+ or switch,<switch_ip>,<switch_vendor>,<switch_version>,
+ <switch_community>,<switch_state>
+ """
+ if not flags.OPTIONS.switch_machines_file:
+ print 'flag --switch_machines_file is missing'
+ return
+ database.init()
+ switches, switch_machines = util.get_switch_machines_from_file(
+ flags.OPTIONS.switch_machines_file)
+ user = user_api.get_user_object(
+ setting.COMPASS_ADMIN_EMAIL
+ )
+ switch_mapping = {}
+ for switch in switches:
+ added_switch = switch_api.add_switch(
+ False, user=user, **switch
+ )
+ switch_mapping[switch['ip']] = added_switch['id']
+ for switch_ip, machines in switch_machines.items():
+ if switch_ip not in switch_mapping:
+ print 'switch ip %s not found' % switch_ip
+ sys.exit(1)
+ switch_id = switch_mapping[switch_ip]
+ for machine in machines:
+ switch_api.add_switch_machine(
+ switch_id, False, user=user, **machine
+ )
+
+
+if __name__ == "__main__":
+ flags.init()
+ logsetting.init()
+ app_manager.run()
diff --git a/compass-deck/bin/poll_switch.py b/compass-deck/bin/poll_switch.py
new file mode 100755
index 0000000..c61e1dd
--- /dev/null
+++ b/compass-deck/bin/poll_switch.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""main script to poll machines which is connected to the switches."""
+import functools
+import logging
+import os
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+import lockfile
+from multiprocessing import Pool
+
+from compass.actions import poll_switch
+from compass.actions import util
+from compass.db.api import database
+from compass.db.api import switch as switch_api
+from compass.db.api import user as user_api
+from compass.tasks.client import celery
+from compass.utils import daemonize
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+flags.add('switch_ips',
+ help='comma seperated switch ips',
+ default='')
+flags.add_bool('async',
+ help='ryn in async mode',
+ default=True)
+flags.add('thread_pool_size', type='int',
+ help='thread pool size when run in noasync mode',
+ default=4)
+flags.add('run_interval', type='int',
+ help='run interval in seconds',
+ default=setting.POLLSWITCH_INTERVAL)
+
+
+def pollswitches(switch_ips):
+ """poll switch."""
+ user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
+ poll_switches = []
+ all_switches = dict([
+ (switch['ip'], switch['credentials'])
+ for switch in switch_api.list_switches(user=user)
+ ])
+ if switch_ips:
+ poll_switches = dict([
+ (switch_ip, all_switches[switch_ip])
+ for switch_ip in switch_ips
+ if switch_ip in all_switches
+ ])
+ else:
+ poll_switches = all_switches
+
+ if flags.OPTIONS.async:
+ for switch_ip, switch_credentials in poll_switches.items():
+ celery.send_task(
+ 'compass.tasks.pollswitch',
+ (user.email, switch_ip, switch_credentials)
+ )
+
+ else:
+ try:
+ pool = Pool(processes=flags.OPTIONS.thread_pool_size)
+ for switch_ip, switch_credentials in poll_switches.items():
+ pool.apply_async(
+ poll_switch.poll_switch,
+ (user.email, switch_ip, switch_credentials)
+ )
+ pool.close()
+ pool.join()
+ except Exception as error:
+ logging.error('failed to poll switches %s',
+ poll_switches)
+ logging.exception(error)
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ database.init()
+ logging.info('run poll_switch')
+ daemonize.daemonize(
+ functools.partial(
+ pollswitches,
+ [switch_ip
+ for switch_ip in flags.OPTIONS.switch_ips.split(',')
+ if switch_ip]),
+ flags.OPTIONS.run_interval,
+ pidfile=lockfile.FileLock('/var/run/poll_switch.pid'),
+ stderr=open('/tmp/poll_switch_err.log', 'w+'),
+ stdout=open('/tmp/poll_switch_out.log', 'w+'))
diff --git a/compass-deck/bin/progress_update.py b/compass-deck/bin/progress_update.py
new file mode 100755
index 0000000..cc8c12b
--- /dev/null
+++ b/compass-deck/bin/progress_update.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""main script to run as service to update hosts installing progress."""
+import functools
+import logging
+import os
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+import lockfile
+
+from compass.actions import update_progress
+from compass.db.api import database
+from compass.tasks.client import celery
+from compass.utils import daemonize
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+flags.add_bool('async',
+ help='run in async mode',
+ default=True)
+flags.add('run_interval', type='int',
+ help='run interval in seconds',
+ default=setting.PROGRESS_UPDATE_INTERVAL)
+
+
+def progress_update():
+ """entry function."""
+ if flags.OPTIONS.async:
+ celery.send_task('compass.tasks.update_progress', ())
+ else:
+ try:
+ update_progress.update_progress()
+ except Exception as error:
+ logging.error('failed to update progress')
+ logging.exception(error)
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ database.init()
+ logging.info('run progress update')
+ daemonize.daemonize(
+ progress_update,
+ flags.OPTIONS.run_interval,
+ pidfile=lockfile.FileLock('/var/run/progress_update.pid'),
+ stderr=open('/tmp/progress_update_err.log', 'w+'),
+ stdout=open('/tmp/progress_update_out.log', 'w+'))
diff --git a/compass-deck/bin/query_switch.py b/compass-deck/bin/query_switch.py
new file mode 100755
index 0000000..4b4b2cd
--- /dev/null
+++ b/compass-deck/bin/query_switch.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""query switch."""
+import optparse
+import Queue
+import threading
+import time
+
+from compass.apiclient.restful import Client
+
+
+class AddSwitch(object):
+ """A utility class.
+
+ Handles adding a switch and retrieving corresponding machines
+ associated with the switch.
+ """
+
+ def __init__(self, server_url):
+ print server_url, " ...."
+ self._client = Client(server_url)
+
+ def add_switch(self, queue, ip, snmp_community):
+ """Add a switch with SNMP credentials.
+
+ :param queue: The result holder for the machine details.
+ :type queue: A Queue object(thread-safe).
+ :param ip: The IP address of the switch.
+ :type ip: string.
+ :param snmp_community: The SNMP community string.
+ :type snmp_community: string.
+ """
+ status, resp = self._client.add_switch(ip,
+ version="2c",
+ community=snmp_community)
+ if status > 409:
+ queue.put((ip, (False,
+ "Failed to add the switch (status=%d)" % status)))
+ return
+
+ if status == 409:
+ # This is the case where the switch with the same IP already
+ # exists in the system. We now try to update the switch
+ # with the given credential.
+ switch_id = resp['failedSwitch']
+ status, resp = self._client.update_switch(switch_id,
+ version="2c",
+ community=snmp_community)
+ if status > 202:
+ queue.put((ip, (False,
+ "Failed to update the switch (status=%d)" %
+ status)))
+ return
+
+ switch = resp['switch']
+ state = switch['state']
+ switch_id = switch['id']
+
+ # if the switch state is not in under_monitoring,
+ # wait for the poll switch task
+ while True:
+ status, resp = self._client.get_switch(switch_id)
+ if status > 400:
+ queue.put((ip, (False, "Failed to get switch status")))
+ return
+
+ switch = resp['switch']
+
+ state = switch['state']
+ if state == 'initialized' or state == 'repolling':
+ time.sleep(5)
+ else:
+ break
+
+ if state == 'under_monitoring':
+ # get machines connected to the switch.
+ status, response = self._client.get_machines(switch_id=switch_id)
+ if status == 200:
+ for machine in response['machines']:
+ queue.put((ip, "mac=%s, vlan=%s, port=%s dbid=%d" % (
+ machine['mac'],
+ machine['vlan'],
+ machine['port'],
+ machine['id'])))
+ else:
+ queue.put((ip, (False,
+ "Failed to get machines %s" %
+ response['status'])))
+ else:
+ queue.put((ip, (False, "Switch state is %s" % state)))
+
+if __name__ == "__main__":
+ usage = "usage: %prog [options] switch_ips"
+ parser = optparse.OptionParser(usage)
+
+ parser.add_option("-u", "--server-url", dest="server_url",
+ default="http://localhost/api",
+ help="The Compass Server URL")
+
+ parser.add_option("-c", "--community", dest="community",
+ default="public",
+ help="Switch SNMP community string")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) != 1:
+ parser.error("Wrong number of arguments")
+
+ threads = []
+ queue = Queue.Queue()
+ add_switch = AddSwitch(options.server_url)
+
+ print "Add switch to the server. This may take a while ..."
+ for switch in args[0].split(','):
+ t = threading.Thread(target=add_switch.add_switch,
+ args=(queue, switch, options.community))
+
+ threads.append(t)
+ t.start()
+
+ for t in threads:
+ t.join(60)
+
+ while True:
+ try:
+ ip, result = queue.get(block=False)
+ print ip, " : ", result
+ except Queue.Empty:
+ break
diff --git a/compass-deck/bin/refresh.sh b/compass-deck/bin/refresh.sh
new file mode 100755
index 0000000..d867440
--- /dev/null
+++ b/compass-deck/bin/refresh.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+/opt/compass/bin/refresh_agent.sh
+/opt/compass/bin/refresh_server.sh
diff --git a/compass-deck/bin/refresh_agent.sh b/compass-deck/bin/refresh_agent.sh
new file mode 100755
index 0000000..13c3050
--- /dev/null
+++ b/compass-deck/bin/refresh_agent.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+set -e
+# systemctl restart mysql.service
+# systemctl status mysql.service || exit $?
+# /opt/compass/bin/manage_db.py createdb
+/opt/compass/bin/clean_installers.py --noasync
+/opt/compass/bin/clean_installation_logs.py
+rm -rf /var/ansible/run/*
+# systemctl restart httpd.service
+# systemctl status httpd.service || exit $?
+systemctl restart rsyslog.service
+systemctl status rsyslog.service || exit $?
+systemctl restart redis.service
+systemctl status redis.service || exit $?
+redis-cli flushall
+systemctl restart cobblerd.service
+systemctl status cobblerd.service || exit $?
+systemctl restart compass-celeryd.service
+systemctl status compass-celeryd.service || exit $?
+# systemctl restart compass-progress-updated.service
+# systemctl status compass-progress-updated.service || exit $?
+
diff --git a/compass-deck/bin/refresh_server.sh b/compass-deck/bin/refresh_server.sh
new file mode 100755
index 0000000..a93204a
--- /dev/null
+++ b/compass-deck/bin/refresh_server.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+set -e
+systemctl restart mysql.service
+systemctl status mysql.service || exit $?
+/opt/compass/bin/manage_db.py createdb
+# /opt/compass/bin/clean_installers.py --noasync
+# /opt/compass/bin/clean_installation_logs.py
+# rm -rf /var/ansible/run/*
+systemctl restart httpd.service
+systemctl status httpd.service || exit $?
+systemctl restart rsyslog.service
+systemctl status rsyslog.service || exit $?
+systemctl restart redis.service
+systemctl status redis.service || exit $?
+redis-cli flushall
+# systemctl restart cobblerd.service
+# systemctl status cobblerd.service || exit $?
+# systemctl restart compass-celeryd.service
+# systemctl status compass-celeryd.service || exit $?
+# systemctl restart compass-progress-updated.service
+# systemctl status compass-progress-updated.service || exit $?
+
diff --git a/compass-deck/bin/runserver.py b/compass-deck/bin/runserver.py
new file mode 100755
index 0000000..b8b1a72
--- /dev/null
+++ b/compass-deck/bin/runserver.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""main script to start an instance of compass server ."""
+import logging
+
+from compass.api import app
+from compass.utils import flags
+from compass.utils import logsetting
+
+
+flags.add('server_host',
+ help='server host address',
+ default='0.0.0.0')
+flags.add_bool('debug',
+ help='run in debug mode',
+ default=True)
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ logging.info('run server')
+ app.run(host=flags.OPTIONS.server_host, debug=flags.OPTIONS.debug)
diff --git a/compass-deck/bin/switch_virtualenv.py b/compass-deck/bin/switch_virtualenv.py
new file mode 100755
index 0000000..ca843eb
--- /dev/null
+++ b/compass-deck/bin/switch_virtualenv.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""utility switch to virtual env."""
+import os
+import os.path
+import site
+import sys
+
+
+virtual_env = '/root/.virtualenvs/compass-core'
+activate_this = '%s/bin/activate_this.py' % virtual_env
+execfile(activate_this, dict(__file__=activate_this))
+site.addsitedir('%s/lib/python2.6/site-packages' % virtual_env)
+if virtual_env not in sys.path:
+ sys.path.append(virtual_env)
+os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
diff --git a/compass-deck/build.sh b/compass-deck/build.sh
new file mode 100755
index 0000000..81ed6ff
--- /dev/null
+++ b/compass-deck/build.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2016-2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -x
+COMPASS_DIR=${BASH_SOURCE[0]%/*}
+
+yum update -y
+
+yum --nogpgcheck install -y which python python-devel git wget syslinux amqp mod_wsgi httpd bind rsync yum-utils gcc unzip openssl openssl098e ca-certificates mysql-devel mysql MySQL-python python-virtualenv python-setuptools python-pip bc libselinux-python libffi-devel openssl-devel vim net-tools
+
+git clone git://git.openstack.org/openstack/compass-web $COMPASS_DIR/../compass-web/
+rm -rf $COMPASS_DIR/../compass-web/.git
+
+easy_install --upgrade pip
+easy_install --upgrade six
+pip install --upgrade pip
+pip install --upgrade setuptools
+pip install --upgrade virtualenv
+pip install --upgrade redis
+pip install --upgrade virtualenvwrapper
+
+source `which virtualenvwrapper.sh`
+mkvirtualenv --system-site-packages compass-core
+workon compass-core
+cd /root/compass-deck
+pip install -U -r requirements.txt
+cd -
+
+systemctl enable httpd
+mkdir -p /var/log/httpd
+chmod -R 777 /var/log/httpd
+mkdir -p /var/www/compass_web/v2.5
+cp -rf $COMPASS_DIR/../compass-web/v2.5/target/* /var/www/compass_web/v2.5/
+
+echo "ServerName compass-deck:80" >> /etc/httpd/conf/httpd.conf
+mkdir -p /opt/compass/bin
+mkdir -p /opt/compass/db
+cp -f $COMPASS_DIR/misc/apache/{ods-server.conf,http_pip.conf,images.conf,packages.conf} \
+/etc/httpd/conf.d/
+cp -rf $COMPASS_DIR/bin/* /opt/compass/bin/
+mkdir -p /var/www/compass
+ln -s -f /opt/compass/bin/compass_wsgi.py /var/www/compass/compass.wsgi
+cp -rf /usr/lib64/libcrypto.so.6 /usr/lib64/libcrypto.so
+
+mkdir -p /var/log/compass
+chmod -R 777 /var/log/compass
+chmod -R 777 /opt/compass/db
+mkdir -p $COMPASS_DIR/compass
+mv $COMPASS_DIR/{actions,api,apiclient,utils,db,tasks,deployment} $COMPASS_DIR/compass/
+touch $COMPASS_DIR/compass/__init__.py
+source `which virtualenvwrapper.sh`
+workon compass-core
+cd /root/compass-deck
+python setup.py install
+usermod -a -G root apache
+
+yum clean all
+
+cp $COMPASS_DIR/start.sh /usr/local/bin/start.sh
+set +x
diff --git a/compass-deck/db/__init__.py b/compass-deck/db/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-deck/db/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/db/api/__init__.py b/compass-deck/db/api/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-deck/db/api/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/db/api/adapter.py b/compass-deck/db/api/adapter.py
new file mode 100644
index 0000000..c3ad48d
--- /dev/null
+++ b/compass-deck/db/api/adapter.py
@@ -0,0 +1,313 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Adapter related database operations."""
+import logging
+import re
+
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+OSES = None
+OS_INSTALLERS = None
+PACKAGE_INSTALLERS = None
+ADAPTERS = None
+ADAPTERS_FLAVORS = None
+ADAPTERS_ROLES = None
+
+
+def _get_oses_from_configuration():
+ """Get all os configs from os configuration dir.
+
+ Example: {
+ <os_name>: {
+ 'name': <os_name>,
+ 'id': <os_name>,
+ 'os_id': <os_name>,
+ 'deployable': True
+ }
+ }
+ """
+ configs = util.load_configs(setting.OS_DIR)
+ systems = {}
+ for config in configs:
+ logging.info('get config %s', config)
+ system_name = config['NAME']
+ parent_name = config.get('PARENT', None)
+ system = {
+ 'name': system_name,
+ 'id': system_name,
+ 'os_id': system_name,
+ 'parent': parent_name,
+ 'parent_id': parent_name,
+ 'deployable': config.get('DEPLOYABLE', False)
+ }
+ systems[system_name] = system
+ parents = {}
+ for name, system in systems.items():
+ parent = system.get('parent', None)
+ parents[name] = parent
+ for name, system in systems.items():
+ util.recursive_merge_dict(name, systems, parents)
+ return systems
+
+
+def _get_installers_from_configuration(configs):
+ """Get installers from configurations.
+
+ Example: {
+ <installer_isntance>: {
+ 'alias': <instance_name>,
+ 'id': <instance_name>,
+ 'name': <name>,
+ 'settings': <dict pass to installer plugin>
+ }
+ }
+ """
+ installers = {}
+ for config in configs:
+ name = config['NAME']
+ instance_name = config.get('INSTANCE_NAME', name)
+ installers[instance_name] = {
+ 'alias': instance_name,
+ 'id': instance_name,
+ 'name': name,
+ 'settings': config.get('SETTINGS', {})
+ }
+ return installers
+
+
+def _get_os_installers_from_configuration():
+ """Get os installers from os installer config dir."""
+ configs = util.load_configs(setting.OS_INSTALLER_DIR)
+ return _get_installers_from_configuration(configs)
+
+
+def _get_package_installers_from_configuration():
+ """Get package installers from package installer config dir."""
+ configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
+ return _get_installers_from_configuration(configs)
+
+
+def _get_adapters_from_configuration():
+ """Get adapters from adapter config dir."""
+ configs = util.load_configs(setting.ADAPTER_DIR)
+ adapters = {}
+ for config in configs:
+ logging.info('add config %s to adapter', config)
+ if 'OS_INSTALLER' in config:
+ os_installer = OS_INSTALLERS[config['OS_INSTALLER']]
+ else:
+ os_installer = None
+
+ if 'PACKAGE_INSTALLER' in config:
+ package_installer = PACKAGE_INSTALLERS[
+ config['PACKAGE_INSTALLER']
+ ]
+ else:
+ package_installer = None
+
+ adapter_name = config['NAME']
+ parent_name = config.get('PARENT', None)
+ adapter = {
+ 'name': adapter_name,
+ 'id': adapter_name,
+ 'parent': parent_name,
+ 'parent_id': parent_name,
+ 'display_name': config.get('DISPLAY_NAME', adapter_name),
+ 'os_installer': os_installer,
+ 'package_installer': package_installer,
+ 'deployable': config.get('DEPLOYABLE', False),
+ 'health_check_cmd': config.get('HEALTH_CHECK_COMMAND', None),
+ 'supported_oses': [],
+ 'roles': [],
+ 'flavors': []
+ }
+ supported_os_patterns = [
+ re.compile(supported_os_pattern)
+ for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', [])
+ ]
+ for os_name, os in OSES.items():
+ if not os.get('deployable', False):
+ continue
+ for supported_os_pattern in supported_os_patterns:
+ if supported_os_pattern.match(os_name):
+ adapter['supported_oses'].append(os)
+ break
+ adapters[adapter_name] = adapter
+
+ parents = {}
+ for name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for name, adapter in adapters.items():
+ util.recursive_merge_dict(name, adapters, parents)
+ return adapters
+
+
+def _add_roles_from_configuration():
+ """Get roles from roles config dir and update to adapters."""
+ configs = util.load_configs(setting.ADAPTER_ROLE_DIR)
+ for config in configs:
+ logging.info(
+ 'add config %s to role', config
+ )
+ adapter_name = config['ADAPTER_NAME']
+ adapter = ADAPTERS[adapter_name]
+ adapter_roles = ADAPTERS_ROLES.setdefault(adapter_name, {})
+ for role_dict in config['ROLES']:
+ role_name = role_dict['role']
+ display_name = role_dict.get('display_name', role_name)
+ adapter_roles[role_name] = {
+ 'name': role_name,
+ 'id': '%s:%s' % (adapter_name, role_name),
+ 'adapter_id': adapter_name,
+ 'adapter_name': adapter_name,
+ 'display_name': display_name,
+ 'description': role_dict.get('description', display_name),
+ 'optional': role_dict.get('optional', False)
+ }
+ parents = {}
+ for name, adapter in ADAPTERS.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
+ util.recursive_merge_dict(adapter_name, ADAPTERS_ROLES, parents)
+ for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
+ adapter = ADAPTERS[adapter_name]
+ adapter['roles'] = adapter_roles.values()
+
+
+def _add_flavors_from_configuration():
+ """Get flavors from flavor config dir and update to adapters."""
+ configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)
+ for config in configs:
+ logging.info('add config %s to flavor', config)
+ adapter_name = config['ADAPTER_NAME']
+ adapter = ADAPTERS[adapter_name]
+ adapter_flavors = ADAPTERS_FLAVORS.setdefault(adapter_name, {})
+ adapter_roles = ADAPTERS_ROLES[adapter_name]
+ for flavor_dict in config['FLAVORS']:
+ flavor_name = flavor_dict['flavor']
+ flavor_id = '%s:%s' % (adapter_name, flavor_name)
+ flavor = {
+ 'name': flavor_name,
+ 'id': flavor_id,
+ 'adapter_id': adapter_name,
+ 'adapter_name': adapter_name,
+ 'display_name': flavor_dict.get('display_name', flavor_name),
+ 'template': flavor_dict.get('template', None)
+ }
+ flavor_roles = flavor_dict.get('roles', [])
+ roles_in_flavor = []
+ for flavor_role in flavor_roles:
+ if isinstance(flavor_role, basestring):
+ role_name = flavor_role
+ role_in_flavor = {
+ 'name': role_name,
+ 'flavor_id': flavor_id
+ }
+ else:
+ role_in_flavor = flavor_role
+ role_in_flavor['flavor_id'] = flavor_id
+ if 'role' in role_in_flavor:
+ role_in_flavor['name'] = role_in_flavor['role']
+ del role_in_flavor['role']
+ role_name = role_in_flavor['name']
+ role = adapter_roles[role_name]
+ util.merge_dict(role_in_flavor, role, override=False)
+ roles_in_flavor.append(role_in_flavor)
+ flavor['roles'] = roles_in_flavor
+ adapter_flavors[flavor_name] = flavor
+ parents = {}
+ for name, adapter in ADAPTERS.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for adapter_name, adapter_roles in ADAPTERS_FLAVORS.items():
+ util.recursive_merge_dict(adapter_name, ADAPTERS_FLAVORS, parents)
+ for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
+ adapter = ADAPTERS[adapter_name]
+ adapter['flavors'] = adapter_flavors.values()
+
+
+def load_adapters_internal(force_reload=False):
+ """Load adapter related configurations into memory.
+
+ If force_reload, reload all configurations even it is loaded already.
+ """
+ global OSES
+ if force_reload or OSES is None:
+ OSES = _get_oses_from_configuration()
+ global OS_INSTALLERS
+ if force_reload or OS_INSTALLERS is None:
+ OS_INSTALLERS = _get_os_installers_from_configuration()
+ global PACKAGE_INSTALLERS
+ if force_reload or PACKAGE_INSTALLERS is None:
+ PACKAGE_INSTALLERS = _get_package_installers_from_configuration()
+ global ADAPTERS
+ if force_reload or ADAPTERS is None:
+ ADAPTERS = _get_adapters_from_configuration()
+ global ADAPTERS_ROLES
+ if force_reload or ADAPTERS_ROLES is None:
+ ADAPTERS_ROLES = {}
+ _add_roles_from_configuration()
+ global ADAPTERS_FLAVORS
+ if force_reload or ADAPTERS_FLAVORS is None:
+ ADAPTERS_FLAVORS = {}
+ _add_flavors_from_configuration()
+
+
+def get_adapters_internal(force_reload=False):
+ """Get all deployable adapters."""
+ load_adapters_internal(force_reload=force_reload)
+ adapter_mapping = {}
+ for adapter_name, adapter in ADAPTERS.items():
+ if adapter.get('deployable'):
+ # TODO(xicheng): adapter should be filtered before
+ # return to caller.
+ adapter_mapping[adapter_name] = adapter
+ else:
+ logging.info(
+ 'ignore adapter %s since it is not deployable',
+ adapter_name
+ )
+ return adapter_mapping
+
+
+def get_flavors_internal(force_reload=False):
+ """Get all deployable flavors."""
+ load_adapters_internal(force_reload=force_reload)
+ adapter_flavor_mapping = {}
+ for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
+ adapter = ADAPTERS.get(adapter_name, {})
+ for flavor_name, flavor in adapter_flavors.items():
+ if adapter.get('deployable'):
+ # TODO(xicheng): flavor dict should be filtered before
+ # return to caller.
+ adapter_flavor_mapping.setdefault(
+ adapter_name, {}
+ )[flavor_name] = flavor
+ else:
+ logging.info(
+ 'ignore adapter %s since it is not deployable',
+ adapter_name
+ )
+
+ return adapter_flavor_mapping
diff --git a/compass-deck/db/api/adapter_holder.py b/compass-deck/db/api/adapter_holder.py
new file mode 100644
index 0000000..91c65c4
--- /dev/null
+++ b/compass-deck/db/api/adapter_holder.py
@@ -0,0 +1,155 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Adapter related object holder."""
+import logging
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+
+
+SUPPORTED_FIELDS = [
+ 'name',
+]
+RESP_FIELDS = [
+ 'id', 'name', 'roles', 'flavors',
+ 'os_installer', 'package_installer',
+ 'supported_oses', 'display_name', 'health_check_cmd'
+]
+RESP_OS_FIELDS = [
+ 'id', 'name', 'os_id'
+]
+RESP_ROLES_FIELDS = [
+ 'id', 'name', 'display_name', 'description', 'optional'
+]
+RESP_FLAVORS_FIELDS = [
+ 'id', 'adapter_id', 'adapter_name', 'name', 'display_name',
+ 'template', 'roles'
+]
+
+
+ADAPTER_MAPPING = None
+FLAVOR_MAPPING = None
+
+
+def load_adapters(force_reload=False):
+ global ADAPTER_MAPPING
+ if force_reload or ADAPTER_MAPPING is None:
+ logging.info('load adapters into memory')
+ ADAPTER_MAPPING = adapter_api.get_adapters_internal(
+ force_reload=force_reload
+ )
+
+
+def load_flavors(force_reload=False):
+ global FLAVOR_MAPPING
+ if force_reload or FLAVOR_MAPPING is None:
+ logging.info('load flavors into memory')
+ FLAVOR_MAPPING = {}
+ adapters_flavors = adapter_api.get_flavors_internal(
+ force_reload=force_reload
+ )
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ for flavor_name, flavor in adapter_flavors.items():
+ FLAVOR_MAPPING['%s:%s' % (adapter_name, flavor_name)] = flavor
+
+
+def _filter_adapters(adapter_config, filter_name, filter_value):
+ if filter_name not in adapter_config:
+ return False
+ if isinstance(filter_value, list):
+ return bool(
+ adapter_config[filter_name] in filter_value
+ )
+ elif isinstance(filter_value, dict):
+ return all([
+ _filter_adapters(
+ adapter_config[filter_name],
+ sub_filter_key, sub_filter_value
+ )
+ for sub_filter_key, sub_filter_value in filter_value.items()
+ ])
+ else:
+ return adapter_config[filter_name] == filter_value
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_ADAPTERS
+)
+@utils.output_filters(name=utils.general_filter_callback)
+@utils.wrap_to_dict(
+ RESP_FIELDS,
+ supported_oses=RESP_OS_FIELDS,
+ roles=RESP_ROLES_FIELDS,
+ flavors=RESP_FLAVORS_FIELDS
+)
+def list_adapters(user=None, session=None, **filters):
+ """list adapters."""
+ load_adapters()
+ return ADAPTER_MAPPING.values()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_ADAPTERS
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS,
+ supported_oses=RESP_OS_FIELDS,
+ roles=RESP_ROLES_FIELDS,
+ flavors=RESP_FLAVORS_FIELDS
+)
+def get_adapter(adapter_id, user=None, session=None, **kwargs):
+ """get adapter."""
+ load_adapters()
+ if adapter_id not in ADAPTER_MAPPING:
+ raise exception.RecordNotExists(
+ 'adpater %s does not exist' % adapter_id
+ )
+ return ADAPTER_MAPPING[adapter_id]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
+def list_flavors(user=None, session=None, **filters):
+ """List flavors."""
+ load_flavors()
+ return FLAVOR_MAPPING.values()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
+def get_flavor(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor."""
+ load_flavors()
+ if flavor_id not in FLAVOR_MAPPING:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ return FLAVOR_MAPPING[flavor_id]
diff --git a/compass-deck/db/api/cluster.py b/compass-deck/db/api/cluster.py
new file mode 100644
index 0000000..7a7022c
--- /dev/null
+++ b/compass-deck/db/api/cluster.py
@@ -0,0 +1,2444 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cluster database operations."""
+import copy
+import functools
+import logging
+import re
+
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import database
+from compass.db.api import metadata_holder as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = [
+ 'name', 'os_name', 'owner',
+ 'adapter_name', 'flavor_name'
+]
+SUPPORTED_CLUSTERHOST_FIELDS = []
+RESP_FIELDS = [
+ 'id', 'name', 'os_name', 'os_id', 'adapter_id', 'flavor_id',
+ 'reinstall_distributed_system', 'flavor',
+ 'distributed_system_installed',
+ 'owner', 'adapter_name', 'flavor_name',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTERHOST_FIELDS = [
+ 'id', 'host_id', 'clusterhost_id', 'machine_id',
+ 'name', 'hostname', 'roles', 'os_installer',
+ 'cluster_id', 'clustername', 'location', 'tag',
+ 'networks', 'mac', 'switch_ip', 'port', 'switches',
+ 'os_installed', 'distributed_system_installed',
+ 'os_name', 'os_id', 'ip',
+ 'reinstall_os', 'reinstall_distributed_system',
+ 'owner', 'cluster_id',
+ 'created_at', 'updated_at',
+ 'patched_roles'
+]
+RESP_CONFIG_FIELDS = [
+ 'os_config',
+ 'package_config',
+ 'config_step',
+ 'config_validated',
+ 'created_at',
+ 'updated_at'
+]
+RESP_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config',
+ 'created_at',
+ 'updated_at'
+]
+RESP_METADATA_FIELDS = [
+ 'os_config', 'package_config'
+]
+RESP_CLUSTERHOST_CONFIG_FIELDS = [
+ 'package_config',
+ 'os_config',
+ 'config_step',
+ 'config_validated',
+ 'networks',
+ 'created_at',
+ 'updated_at'
+]
+RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config',
+ 'created_at',
+ 'updated_at'
+]
+RESP_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity',
+ 'status', 'ready',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTERHOST_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity',
+ 'ready', 'created_at', 'updated_at'
+]
+RESP_REVIEW_FIELDS = [
+ 'cluster', 'hosts'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'cluster', 'hosts'
+]
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+ADDED_FIELDS = ['name', 'adapter_id', 'os_id']
+OPTIONAL_ADDED_FIELDS = ['flavor_id']
+UPDATED_FIELDS = ['name', 'reinstall_distributed_system']
+ADDED_HOST_FIELDS = ['machine_id']
+UPDATED_HOST_FIELDS = ['name', 'reinstall_os']
+UPDATED_CLUSTERHOST_FIELDS = ['roles', 'patched_roles']
+PATCHED_CLUSTERHOST_FIELDS = ['patched_roles']
+UPDATED_CONFIG_FIELDS = [
+ 'put_os_config', 'put_package_config', 'config_step'
+]
+UPDATED_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config', 'deployed_package_config'
+]
+PATCHED_CONFIG_FIELDS = [
+ 'patched_os_config', 'patched_package_config', 'config_step'
+]
+UPDATED_CLUSTERHOST_CONFIG_FIELDS = [
+ 'put_os_config',
+ 'put_package_config'
+]
+PATCHED_CLUSTERHOST_CONFIG_FIELDS = [
+ 'patched_os_config',
+ 'patched_package_config'
+]
+UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config'
+]
+UPDATED_CLUSTERHOST_STATE_FIELDS = [
+ 'state', 'percentage', 'message', 'severity'
+]
+UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS = [
+ 'ready'
+]
+UPDATED_CLUSTER_STATE_FIELDS = ['state']
+IGNORE_UPDATED_CLUSTER_STATE_FIELDS = ['percentage', 'message', 'severity']
+UPDATED_CLUSTER_STATE_INTERNAL_FIELDS = ['ready']
+RESP_CLUSTERHOST_LOG_FIELDS = [
+ 'clusterhost_id', 'id', 'host_id', 'cluster_id',
+ 'filename', 'position', 'partial_line',
+ 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+ADDED_CLUSTERHOST_LOG_FIELDS = [
+ 'filename'
+]
+UPDATED_CLUSTERHOST_LOG_FIELDS = [
+ 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_clusters(user=None, session=None, **filters):
+ """List clusters."""
+ clusters = utils.list_db_objects(
+ session, models.Cluster, **filters
+ )
+ logging.info('user is %s', user.email)
+ if not user.is_admin and len(clusters):
+ clusters = [c for c in clusters if c.owner == user.email]
+ return clusters
+
+
+def _get_cluster(cluster_id, session=None, **kwargs):
+ """Get cluster by id."""
+ if isinstance(cluster_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Cluster, id=cluster_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'cluster id %s type is not int compatible' % cluster_id
+ )
+
+
+def get_cluster_internal(cluster_id, session=None, **kwargs):
+ """Helper function to get cluster.
+
+ Should be only used by other files under db/api.
+ """
+ return _get_cluster(cluster_id, session=session, **kwargs)
+
+
+def _get_cluster_host(
+ cluster_id, host_id, session=None, **kwargs
+):
+ """Get clusterhost by cluster id and host id."""
+ cluster = _get_cluster(cluster_id, session=session, **kwargs)
+ from compass.db.api import host as host_api
+ host = host_api.get_host_internal(host_id, session=session, **kwargs)
+ return utils.get_db_object(
+ session, models.ClusterHost,
+ cluster_id=cluster.id,
+ host_id=host.id,
+ **kwargs
+ )
+
+
+def _get_clusterhost(clusterhost_id, session=None, **kwargs):
+ """Get clusterhost by clusterhost id."""
+ if isinstance(clusterhost_id, (int, long)):
+ return utils.get_db_object(
+ session, models.ClusterHost,
+ clusterhost_id=clusterhost_id,
+ **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'clusterhost id %s type is not int compatible' % clusterhost_id
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_cluster(
+ cluster_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get cluster info."""
+ return _get_cluster(
+ cluster_id,
+ session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS)
+def is_cluster_os_ready(
+ cluster_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ cluster = utils.get_db_object(
+ session, models.Cluster, exception_when_missing, id=cluster_id)
+
+ all_states = ([i.host.state.ready for i in cluster.clusterhosts])
+
+ logging.info("is_cluster_os_ready: all_states %s" % all_states)
+
+ return all(all_states)
+
+
+def check_cluster_validated(cluster):
+ """Check cluster is validated."""
+ if not cluster.config_validated:
+ raise exception.Forbidden(
+ 'cluster %s is not validated' % cluster.name
+ )
+
+
+def check_clusterhost_validated(clusterhost):
+ """Check clusterhost is validated."""
+ if not clusterhost.config_validated:
+ raise exception.Forbidden(
+ 'clusterhost %s is not validated' % clusterhost.name
+ )
+
+
+def check_cluster_editable(
+ cluster, user=None,
+ check_in_installing=False
+):
+ """Check if cluster is editable.
+
+ If we try to set cluster
+ reinstall_distributed_system attribute or any
+ checking to make sure the cluster is not in installing state,
+ we can set check_in_installing to True.
+ Otherwise we will make sure the cluster is not in deploying or
+ deployed.
+ If user is not admin or not the owner of the cluster, the check
+ will fail to make sure he can not update the cluster attributes.
+ """
+ if check_in_installing:
+ if cluster.state.state == 'INSTALLING':
+ raise exception.Forbidden(
+ 'cluster %s is not editable '
+ 'when state is installing' % cluster.name
+ )
+# elif (
+# cluster.flavor_name and
+# not cluster.reinstall_distributed_system
+# ):
+# raise exception.Forbidden(
+# 'cluster %s is not editable '
+# 'when not to be reinstalled' % cluster.name
+# )
+ if user and not user.is_admin and cluster.creator_id != user.id:
+ raise exception.Forbidden(
+ 'cluster %s is not editable '
+ 'when user is not admin or cluster owner' % cluster.name
+ )
+
+
+def is_cluster_editable(
+ cluster, user=None,
+ check_in_installing=False
+):
+ """Get if cluster is editble."""
+ try:
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=check_in_installing
+ )
+ return True
+ except exception.Forbidden:
+ return False
+
+
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_cluster(
+ exception_when_existing=True,
+ name=None, adapter_id=None, flavor_id=None,
+ user=None, session=None, **kwargs
+):
+ """Create a cluster."""
+ adapter = adapter_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ # if flavor_id is not None, also set flavor field.
+ # In future maybe we can move the use of flavor from
+ # models.py to db/api and explictly get flavor when
+ # needed instead of setting flavor into cluster record.
+ flavor = {}
+ if flavor_id:
+ flavor = adapter_api.get_flavor(
+ flavor_id,
+ user=user, session=session
+ )
+ if flavor['adapter_id'] != adapter['id']:
+ raise exception.InvalidParameter(
+ 'flavor %s is not of adapter %s' % (
+ flavor_id, adapter_id
+ )
+ )
+
+ cluster = utils.add_db_object(
+ session, models.Cluster, exception_when_existing,
+ name, user.id, adapter_id=adapter_id,
+ flavor_id=flavor_id, flavor=flavor, **kwargs
+ )
+ return cluster
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_cluster(cluster_id, user=None, session=None, **kwargs):
+ """Update a cluster."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=(
+ kwargs.get('reinstall_distributed_system', False)
+ )
+ )
+ return utils.update_db_object(session, cluster, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS + ['status', 'cluster', 'hosts'],
+ cluster=RESP_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def del_cluster(
+ cluster_id, force=False, from_database_only=False,
+ delete_underlying_host=False, user=None, session=None, **kwargs
+):
+ """Delete a cluster.
+
+ If force, the cluster will be deleted anyway. It is used by cli to
+ force clean a cluster in any case.
+ If from_database_only, the cluster recored will only be removed from
+ database. Otherwise, a del task is sent to celery to do clean deletion.
+ If delete_underlying_host, all hosts under this cluster will also be
+ deleted.
+ The backend will call del_cluster again with from_database_only set
+ when it has done the deletion work on os installer/package installer.
+ """
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ logging.debug(
+ 'delete cluster %s with force=%s '
+ 'from_database_only=%s delete_underlying_host=%s',
+ cluster.id, force, from_database_only, delete_underlying_host
+ )
+ # force set cluster state to ERROR and the state of any clusterhost
+ # in the cluster to ERROR when we want to delete the cluster anyway
+ # even the cluster is in installing or already installed.
+ # It let the api know the deleting is in doing when backend is doing
+ # the real deleting.
+ # In future we may import a new state like INDELETE to indicate
+ # the deleting is processing.
+ # We need discuss about if we can delete a cluster when it is already
+ # installed by api.
+ for clusterhost in cluster.clusterhosts:
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ if delete_underlying_host:
+ host = clusterhost.host
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ if cluster.state.state != 'UNINITIALIZED' and force:
+ cluster.state.state = 'ERROR'
+
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+
+ # delete underlying host if delete_underlying_host is set.
+ if delete_underlying_host:
+ for clusterhost in cluster.clusterhosts:
+ # delete underlying host only user has permission.
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ if host_api.is_host_editable(
+ host, user=user, check_in_installing=True
+ ):
+ # Delete host record directly in database when there is no need
+ # to do the deletion in backend or from_database_only is set.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ utils.del_db_object(
+ session, host
+ )
+
+ # Delete cluster record directly in database when there
+ # is no need to do the deletion in backend or from_database_only is set.
+ if cluster.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(
+ session, cluster
+ )
+ else:
+ from compass.tasks import client as celery_client
+ logging.info('send del cluster %s task to celery', cluster_id)
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_cluster',
+ (
+ user.email, cluster.id,
+ [
+ clusterhost.host_id
+ for clusterhost in cluster.clusterhosts
+ ],
+ delete_underlying_host
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'delete action is sent',
+ 'cluster': cluster,
+ 'hosts': cluster.clusterhosts
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def get_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster config."""
+ return _get_cluster(cluster_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def get_cluster_deployed_config(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster deployed config."""
+ return _get_cluster(cluster_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_cluster_metadata(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster metadata.
+
+ If no flavor in the cluster, it means this is a os only cluster.
+ We ignore package metadata for os only cluster.
+ """
+ cluster = _get_cluster(cluster_id, session=session)
+ metadatas = {}
+ os_name = cluster.os_name
+ if os_name:
+ metadatas.update(
+ metadata_api.get_os_metadata(
+ os_name, session=session
+ )
+ )
+ flavor_id = cluster.flavor_id
+ if flavor_id:
+ metadatas.update(
+ metadata_api.get_flavor_metadata(
+ flavor_id,
+ user=user, session=session
+ )
+ )
+
+ return metadatas
+
+
+def _cluster_os_config_validates(
+ config, cluster, session=None, user=None, **kwargs
+):
+ """Check cluster os config validation."""
+ metadata_api.validate_os_config(
+ config, cluster.os_id
+ )
+
+
+def _cluster_package_config_validates(
+ config, cluster, session=None, user=None, **kwargs
+):
+ """Check cluster package config validation."""
+ metadata_api.validate_flavor_config(
+ config, cluster.flavor_id
+ )
+
+
+@utils.input_validates_with_args(
+ put_os_config=_cluster_os_config_validates,
+ put_package_config=_cluster_package_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_cluster_os_config_validates,
+ package_config=_cluster_package_config_validates
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def _update_cluster_config(cluster, session=None, user=None, **kwargs):
+ """Update a cluster config."""
+ check_cluster_editable(cluster, user=user)
+ return utils.update_db_object(
+ session, cluster, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config,
+# package_config to deployed_package_config
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_DEPLOYED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def update_cluster_deployed_config(
+ cluster_id, user=None, session=None, **kwargs
+):
+ """Update cluster deployed config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ return utils.update_db_object(
+ session, cluster, **kwargs
+ )
+
+
+# replace os_config to put_os_config,
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+def update_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """Update cluster config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return _update_cluster_config(
+ cluster, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs.
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+def patch_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """patch cluster config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return _update_cluster_config(
+ cluster, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def del_cluster_config(cluster_id, user=None, session=None):
+ """Delete a cluster config."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ check_cluster_editable(cluster, user=user)
+ return utils.update_db_object(
+ session, cluster, os_config={},
+ package_config={}, config_validated=False
+ )
+
+
+def _roles_validates(roles, cluster, session=None, user=None):
+ """Check roles is validated to a cluster's roles."""
+ if roles:
+ if not cluster.flavor_name:
+ raise exception.InvalidParameter(
+ 'not flavor in cluster %s' % cluster.name
+ )
+ cluster_roles = [role['name'] for role in cluster.flavor['roles']]
+ for role in roles:
+ if role not in cluster_roles:
+ raise exception.InvalidParameter(
+ 'role %s is not in cluster roles %s' % (
+ role, cluster_roles
+ )
+ )
+
+
+def _cluster_host_roles_validates(
+ value, cluster, host, session=None, user=None, **kwargs
+):
+ """Check clusterhost roles is validated by cluster and host."""
+ _roles_validates(value, cluster, session=session, user=user)
+
+
+def _clusterhost_roles_validates(
+ value, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost roles is validated by clusterhost."""
+ _roles_validates(
+ value, clusterhost.cluster, session=session, user=user
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_HOST_FIELDS,
+ ignore_support_keys=UPDATED_CLUSTERHOST_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+def _add_host_if_not_exist(
+ machine_id, cluster, session=None, user=None, **kwargs
+):
+ """Add underlying host if it does not exist."""
+ from compass.db.api import host as host_api
+ host = host_api.get_host_internal(
+ machine_id, session=session, exception_when_missing=False
+ )
+ if host:
+ if kwargs:
+ # ignore update underlying host if host is not editable.
+ from compass.db.api import host as host_api
+ if host_api.is_host_editable(
+ host, user=cluster.creator,
+ check_in_installing=kwargs.get('reinstall_os', False),
+ ):
+ utils.update_db_object(
+ session, host,
+ **kwargs
+ )
+ else:
+ logging.debug(
+ 'ignore update host host %s '
+ 'since it is not editable' % host.name
+ )
+ else:
+ logging.debug('nothing to update for host %s', host.name)
+ else:
+ from compass.db.api import adapter_holder as adapter_api
+ adapter = adapter_api.get_adapter(
+ cluster.adapter_name, user=user, session=session
+ )
+ host = utils.add_db_object(
+ session, models.Host, False, machine_id,
+ os_name=cluster.os_name,
+ os_installer=adapter['os_installer'],
+ creator=cluster.creator,
+ **kwargs
+ )
+ return host
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=UPDATED_HOST_FIELDS
+)
+@utils.input_validates_with_args(
+ roles=_cluster_host_roles_validates
+)
+def _add_clusterhost_only(
+ cluster, host,
+ exception_when_existing=False,
+ session=None, user=None,
+ **kwargs
+):
+ """Get clusterhost only."""
+ if not cluster.state.state == "UNINITIALIZED":
+ cluster.state.ready = False
+ cluster.state.state = "UNINITIALIZED"
+ cluster.state.percentage = 0.0
+ utils.update_db_object(session, cluster.state, state="UNINITIALIZED")
+
+ return utils.add_db_object(
+ session, models.ClusterHost, exception_when_existing,
+ cluster.id, host.id, **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_HOST_FIELDS,
+ optional_support_keys=UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+def _add_clusterhost(
+ cluster,
+ exception_when_existing=False,
+ session=None, user=None, machine_id=None, **kwargs
+):
+ """Add clusterhost and add underlying host if it does not exist."""
+ host = _add_host_if_not_exist(
+ machine_id, cluster, session=session,
+ user=user, **kwargs
+ )
+
+ return _add_clusterhost_only(
+ cluster, host, exception_when_existing=exception_when_existing,
+ session=session, user=user, **kwargs
+ )
+
+
+def _add_clusterhosts(cluster, machines, session=None, user=None):
+ """Add machines to cluster.
+
+ Args:
+ machines: list of dict which contains clusterost attr to update.
+
+ Examples:
+ [{'machine_id': 1, 'name': 'host1'}]
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state == 'UPDATE_PREPARING'
+ for machine_dict in machines:
+ _add_clusterhost(
+ cluster, session=session, user=user, **machine_dict
+ )
+
+
+def _remove_clusterhosts(cluster, hosts, session=None, user=None):
+ """Remove hosts from cluster.
+
+ Args:
+ hosts: list of host id.
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ utils.del_db_objects(
+ session, models.ClusterHost,
+ cluster_id=cluster.id, host_id=hosts
+ )
+
+
+def _set_clusterhosts(cluster, machines, session=None, user=None):
+ """set machines to cluster.
+
+ Args:
+ machines: list of dict which contains clusterost attr to update.
+
+ Examples:
+ [{'machine_id': 1, 'name': 'host1'}]
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ utils.del_db_objects(
+ session, models.ClusterHost,
+ cluster_id=cluster.id
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state = 'UPDATE_PREPARING'
+ for machine_dict in machines:
+ _add_clusterhost(
+ cluster, True, session=session, user=user, **machine_dict
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def list_cluster_hosts(cluster_id, user=None, session=None, **filters):
+ """List clusterhosts of a cluster."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return utils.list_db_objects(
+ session, models.ClusterHost, cluster_id=cluster.id,
+ **filters
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def list_clusterhosts(user=None, session=None, **filters):
+ """List all clusterhosts."""
+ return utils.list_db_objects(
+ session, models.ClusterHost, **filters
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def get_cluster_host(
+ cluster_id, host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get clusterhost info by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session,
+ exception_when_missing=exception_when_missing,
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def get_clusterhost(
+ clusterhost_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get clusterhost info by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session,
+ exception_when_missing=exception_when_missing,
+ user=user
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def add_cluster_host(
+ cluster_id, exception_when_existing=True,
+ user=None, session=None, **kwargs
+):
+ """Add a host to a cluster."""
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state = 'UPDATE_PREPARING'
+ return _add_clusterhost(
+ cluster, exception_when_existing,
+ session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_HOST_FIELDS,
+ ignore_support_keys=(
+ UPDATED_CLUSTERHOST_FIELDS +
+ PATCHED_CLUSTERHOST_FIELDS
+ )
+)
+def _update_host_if_necessary(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update underlying host if there is something to update."""
+ host = clusterhost.host
+ if kwargs:
+ # ignore update underlying host if the host is not editable.
+ from compass.db.api import host as host_api
+ if host_api.is_host_editable(
+ host, user=clusterhost.cluster.creator,
+ check_in_installing=kwargs.get('reinstall_os', False),
+ ):
+ utils.update_db_object(
+ session, host,
+ **kwargs
+ )
+ else:
+ logging.debug(
+ 'ignore update host %s since it is not editable' % host.name
+ )
+ else:
+ logging.debug(
+ 'nothing to update for host %s', host.name
+ )
+ return host
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_CLUSTERHOST_FIELDS +
+ PATCHED_CLUSTERHOST_FIELDS
+ ),
+ ignore_support_keys=UPDATED_HOST_FIELDS
+)
+@utils.input_validates_with_args(
+ roles=_clusterhost_roles_validates,
+ patched_roles=_clusterhost_roles_validates
+)
+def _update_clusterhost_only(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update clusterhost only."""
+ check_cluster_editable(clusterhost.cluster, user=user)
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def _update_clusterhost(clusterhost, session=None, user=None, **kwargs):
+ """Update clusterhost and underlying host if necessary."""
+ _update_host_if_necessary(
+ clusterhost, session=session, user=user, **kwargs
+ )
+ return _update_clusterhost_only(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def update_cluster_host(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Update clusterhost by cluster id and host id."""
+ logging.info('updating kwargs: %s', kwargs)
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def update_clusterhost(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Update clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace roles to patched_roles in kwargs.
+# It tells db roles field will be patched.
+@utils.replace_filters(
+ roles='patched_roles'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def patch_cluster_host(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Patch clusterhost by cluster id and host id."""
+ logging.info("kwargs are %s", kwargs)
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ updated_clusterhost = _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+ return updated_clusterhost
+
+
+# replace roles to patched_roles in kwargs.
+# It tells db roles field will be patched.
+@utils.replace_filters(
+ roles='patched_roles'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def patch_clusterhost(
+ clusterhost_id, user=None, session=None,
+ **kwargs
+):
+ """Patch clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER_HOST
+)
+@utils.wrap_to_dict(
+ RESP_CLUSTERHOST_FIELDS + ['status', 'host'],
+ host=RESP_CLUSTERHOST_FIELDS
+)
+def _del_cluster_host(
+ clusterhost,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """delete clusterhost.
+
+ If force, the cluster host will be deleted anyway.
+ If from_database_only, the cluster host recored will only be
+ deleted from database. Otherwise a celery task sent to do
+ clean deletion.
+ If delete_underlying_host, the underlying host will also be deleted.
+ The backend will call _del_cluster_host again when the clusterhost is
+ deleted from os installer/package installer with from_database_only
+ set.
+ """
+ # force set clusterhost state to ERROR when we want to delete the
+ # clusterhost anyway even the clusterhost is in installing or already
+ # installed. It let the api know the deleting is in doing when backend
+ # is doing the real deleting. In future we may import a new state like
+ # INDELETE to indicate the deleting is processing.
+ # We need discuss about if we can delete a clusterhost when it is already
+ # installed by api.
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ if not force:
+ check_cluster_editable(
+ clusterhost.cluster, user=user,
+ check_in_installing=True
+ )
+ # delete underlying host if delete_underlying_host is set.
+ if delete_underlying_host:
+ host = clusterhost.host
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ # only delete the host when user have the permission to delete it.
+ import compass.db.api.host as host_api
+ if host_api.is_host_editable(
+ host, user=user,
+ check_in_installing=True
+ ):
+ # if there is no need to do the deletion by backend or
+ # from_database_only is set, we only delete the record
+ # in database.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ utils.del_db_object(
+ session, host
+ )
+
+ # if there is no need to do the deletion by backend or
+ # from_database_only is set, we only delete the record in database.
+ if clusterhost.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(
+ session, clusterhost
+ )
+ else:
+ logging.info(
+ 'send del cluster %s host %s task to celery',
+ clusterhost.cluster_id, clusterhost.host_id
+ )
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_cluster_host',
+ (
+ user.email, clusterhost.cluster_id, clusterhost.host_id,
+ delete_underlying_host
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'delete action sent',
+ 'host': clusterhost,
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+def del_cluster_host(
+ cluster_id, host_id,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """Delete clusterhost by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _del_cluster_host(
+ clusterhost, force=force, from_database_only=from_database_only,
+ delete_underlying_host=delete_underlying_host, user=user,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+def del_clusterhost(
+ clusterhost_id,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """Delete clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _del_cluster_host(
+ clusterhost, force=force, from_database_only=from_database_only,
+ delete_underlying_host=delete_underlying_host, user=user,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def get_cluster_host_config(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost config by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def get_cluster_host_deployed_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost deployed config by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def get_clusterhost_config(clusterhost_id, user=None, session=None, **kwargs):
+ """Get clusterhost config by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def get_clusterhost_deployed_config(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost deployed config by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ )
+
+
+def _clusterhost_os_config_validates(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Validate clusterhost's underlying host os config."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ host_api.check_host_editable(host, user=user)
+ metadata_api.validate_os_config(
+ config, host.os_id
+ )
+
+
+def _clusterhost_package_config_validates(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Validate clusterhost's cluster package config."""
+ cluster = clusterhost.cluster
+ check_cluster_editable(cluster, user=user)
+ metadata_api.validate_flavor_config(
+ config, cluster.flavor_id
+ )
+
+
+def _filter_clusterhost_host_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Filter fields if the underlying host is not editable."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ return host_api.is_host_editable(host, user=user)
+
+
+@utils.input_filters(
+ put_os_config=_filter_clusterhost_host_editable,
+ patched_os_config=_filter_clusterhost_host_editable
+)
+@utils.input_validates_with_args(
+ put_os_config=_clusterhost_os_config_validates,
+ put_package_config=_clusterhost_package_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_clusterhost_os_config_validates,
+ package_config=_clusterhost_package_config_validates
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def _update_clusterhost_config(clusterhost, session=None, user=None, **kwargs):
+ """Update clusterhost config."""
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+def _clusterhost_host_validated(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's underlying host is validated."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ host_api.check_host_editable(host, user=user)
+ host_api.check_host_validated(host)
+
+
+def _clusterhost_cluster_validated(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's cluster is validated."""
+ cluster = clusterhost.cluster
+ check_cluster_editable(cluster, user=user)
+ check_clusterhost_validated(clusterhost)
+
+
+@utils.input_filters(
+ deployed_os_config=_filter_clusterhost_host_editable,
+)
+@utils.input_validates_with_args(
+ deployed_os_config=_clusterhost_host_validated,
+ deployed_package_config=_clusterhost_cluster_validated
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def _update_clusterhost_deployed_config(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update clusterhost deployed config."""
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+# replace os_config to put_os_config and
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_cluster_host_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, user=user, session=session, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config and
+# package_config to deployed_package_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_cluster_host_deployed_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost deployed config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_deployed_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to put_os_config and
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_clusterhost_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config and
+# package_config to deployed_package_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_clusterhost_deployed_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost deployed config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_deployed_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def patch_cluster_host_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """patch clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def patch_clusterhost_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """patch clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+def _clusterhost_host_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost underlying host is editable."""
+ from compass.db.api import host as host_api
+ host_api.check_host_editable(clusterhost.host, user=user)
+
+
+def _clusterhost_cluster_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's cluster is editable."""
+ check_cluster_editable(clusterhost.cluster, user=user)
+
+
+@utils.supported_filters(
+ optional_support_keys=['os_config', 'package_config']
+)
+@utils.input_filters(
+ os_config=_filter_clusterhost_host_editable,
+)
+@utils.output_validates_with_args(
+ package_config=_clusterhost_cluster_editable
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def _delete_clusterhost_config(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """delete clusterhost config."""
+ return utils.update_db_object(
+ session, clusterhost, config_validated=False,
+ **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
+)
+def delete_cluster_host_config(
+ cluster_id, host_id, user=None, session=None
+):
+ """Delete a clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _delete_clusterhost_config(
+ clusterhost, session=session, user=user,
+ os_config={}, package_config={}
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def delete_clusterhost_config(clusterhost_id, user=None, session=None):
+ """Delet a clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _delete_clusterhost_config(
+ clusterhost, session=session, user=user,
+ os_config={}, package_config={}
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=['add_hosts', 'remove_hosts', 'set_hosts']
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+@utils.wrap_to_dict(
+ ['hosts'],
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def update_cluster_hosts(
+ cluster_id, add_hosts={}, set_hosts=None,
+ remove_hosts={}, user=None, session=None
+):
+ """Update cluster hosts."""
+ cluster = _get_cluster(cluster_id, session=session)
+ if remove_hosts:
+ _remove_clusterhosts(
+ cluster, session=session, user=user, **remove_hosts
+ )
+ if add_hosts:
+ _add_clusterhosts(
+ cluster, session=session, user=user, **add_hosts
+ )
+ if set_hosts is not None:
+ _set_clusterhosts(
+ cluster, session=session, user=user, **set_hosts
+ )
+
+ return {
+ 'hosts': list_cluster_hosts(cluster_id, session=session)
+ }
+
+
+def validate_clusterhost(clusterhost, session=None):
+ """validate clusterhost."""
+ roles = clusterhost.roles
+ if not roles:
+ if clusterhost.cluster.flavor_name:
+ raise exception.InvalidParameter(
+ 'empty roles for clusterhost %s' % clusterhost.name
+ )
+
+
+def validate_cluster(cluster, session=None):
+ """Validate cluster."""
+ if not cluster.clusterhosts:
+ raise exception.InvalidParameter(
+ 'cluster %s does not have any hosts' % cluster.name
+ )
+ if cluster.flavor_name:
+ cluster_roles = cluster.flavor['roles']
+ else:
+ cluster_roles = []
+ necessary_roles = set([
+ role['name'] for role in cluster_roles if not role.get('optional')
+ ])
+ clusterhost_roles = set([])
+ interface_subnets = {}
+ for clusterhost in cluster.clusterhosts:
+ roles = clusterhost.roles
+ for role in roles:
+ clusterhost_roles.add(role['name'])
+ host = clusterhost.host
+ for host_network in host.host_networks:
+ interface_subnets.setdefault(
+ host_network.interface, set([])
+ ).add(host_network.subnet.subnet)
+ missing_roles = necessary_roles - clusterhost_roles
+ if missing_roles:
+ raise exception.InvalidParameter(
+ 'cluster %s have some roles %s not assigned to any host' % (
+ cluster.name, list(missing_roles)
+ )
+ )
+ for interface, subnets in interface_subnets.items():
+ if len(subnets) > 1:
+ raise exception.InvalidParameter(
+ 'cluster %s multi subnets %s in interface %s' % (
+ cluster.name, list(subnets), interface
+ )
+ )
+
+
+@utils.supported_filters(optional_support_keys=['review'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_REVIEW_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_REVIEW_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_CONFIG_FIELDS
+)
+def review_cluster(cluster_id, review={}, user=None, session=None, **kwargs):
+ """review cluster.
+
+ Args:
+ cluster_id: the cluster id.
+ review: dict contains hosts to be reviewed. either contains key
+ hosts or clusterhosts. where hosts is a list of host id,
+ clusterhosts is a list of clusterhost id.
+ """
+ from compass.db.api import host as host_api
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(cluster, user=user)
+ host_ids = review.get('hosts', [])
+ clusterhost_ids = review.get('clusterhosts', [])
+ clusterhosts = []
+ # Get clusterhosts need to be reviewed.
+ for clusterhost in cluster.clusterhosts:
+ if (
+ clusterhost.clusterhost_id in clusterhost_ids or
+ clusterhost.host_id in host_ids
+ ):
+ clusterhosts.append(clusterhost)
+
+ os_config = copy.deepcopy(cluster.os_config)
+ os_config = metadata_api.autofill_os_config(
+ os_config, cluster.os_id, cluster=cluster
+ )
+ metadata_api.validate_os_config(
+ os_config, cluster.os_id, True
+ )
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ # ignore underlying host os config validation
+ # since the host is not editable
+ if not host_api.is_host_editable(
+ host, user=user, check_in_installing=False
+ ):
+ logging.info(
+ 'ignore update host %s config '
+ 'since it is not editable' % host.name
+ )
+ continue
+ host_os_config = copy.deepcopy(host.os_config)
+ host_os_config = metadata_api.autofill_os_config(
+ host_os_config, host.os_id,
+ host=host
+ )
+ deployed_os_config = util.merge_dict(
+ os_config, host_os_config
+ )
+ metadata_api.validate_os_config(
+ deployed_os_config, host.os_id, True
+ )
+ host_api.validate_host(host)
+ utils.update_db_object(
+ session, host, os_config=host_os_config, config_validated=True
+ )
+
+ package_config = copy.deepcopy(cluster.package_config)
+ if cluster.flavor_name:
+ package_config = metadata_api.autofill_flavor_config(
+ package_config, cluster.flavor_id,
+ cluster=cluster
+ )
+ metadata_api.validate_flavor_config(
+ package_config, cluster.flavor_id, True
+ )
+ for clusterhost in clusterhosts:
+ clusterhost_package_config = copy.deepcopy(
+ clusterhost.package_config
+ )
+ clusterhost_package_config = (
+ metadata_api.autofill_flavor_config(
+ clusterhost_package_config,
+ cluster.flavor_id,
+ clusterhost=clusterhost
+ )
+ )
+ deployed_package_config = util.merge_dict(
+ package_config, clusterhost_package_config
+ )
+ metadata_api.validate_flavor_config(
+ deployed_package_config,
+ cluster.flavor_id, True
+ )
+ validate_clusterhost(clusterhost, session=session)
+ utils.update_db_object(
+ session, clusterhost,
+ package_config=clusterhost_package_config,
+ config_validated=True
+ )
+
+ validate_cluster(cluster, session=session)
+ utils.update_db_object(
+ session, cluster, os_config=os_config, package_config=package_config,
+ config_validated=True
+ )
+ return {
+ 'cluster': cluster,
+ 'hosts': clusterhosts
+ }
+
+
+@utils.supported_filters(optional_support_keys=['deploy'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def deploy_cluster(
+ cluster_id, deploy={}, user=None, session=None, **kwargs
+):
+ """deploy cluster.
+
+ Args:
+ cluster_id: cluster id.
+ deploy: dict contains key either hosts or clusterhosts.
+ deploy['hosts'] is a list of host id,
+ deploy['clusterhosts'] is a list of clusterhost id.
+ """
+ from compass.db.api import host as host_api
+ from compass.tasks import client as celery_client
+ cluster = _get_cluster(cluster_id, session=session)
+ host_ids = deploy.get('hosts', [])
+ clusterhost_ids = deploy.get('clusterhosts', [])
+ clusterhosts = []
+ # get clusterhost to deploy.
+ for clusterhost in cluster.clusterhosts:
+ if (
+ clusterhost.clusterhost_id in clusterhost_ids or
+ clusterhost.host_id in host_ids
+ ):
+ clusterhosts.append(clusterhost)
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ utils.update_db_object(session, cluster.state, state='INITIALIZED')
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ # ignore checking if underlying host is validated if
+ # the host is not editable.
+ if host_api.is_host_editable(host, user=user):
+ host_api.check_host_validated(host)
+ utils.update_db_object(session, host.state, state='INITIALIZED')
+ if cluster.flavor_name:
+ check_clusterhost_validated(clusterhost)
+ utils.update_db_object(
+ session, clusterhost.state, state='INITIALIZED'
+ )
+
+ celery_client.celery.send_task(
+ 'compass.tasks.deploy_cluster',
+ (
+ user.email, cluster_id,
+ [clusterhost.host_id for clusterhost in clusterhosts]
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'deploy action sent',
+ 'cluster': cluster,
+ 'hosts': clusterhosts
+ }
+
+
+@utils.supported_filters(optional_support_keys=['redeploy'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def redeploy_cluster(
+ cluster_id, deploy={}, user=None, session=None, **kwargs
+):
+ """redeploy cluster.
+
+ Args:
+ cluster_id: cluster id.
+ """
+ from compass.db.api import host as host_api
+ from compass.tasks import client as celery_client
+ cluster = _get_cluster(cluster_id, session=session)
+
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ utils.update_db_object(
+ session, cluster.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+ for clusterhost in cluster.clusterhosts:
+ host = clusterhost.host
+ # ignore checking if underlying host is validated if
+ # the host is not editable.
+ host_api.check_host_validated(host)
+ utils.update_db_object(
+ session, host.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+ if cluster.flavor_name:
+ check_clusterhost_validated(clusterhost)
+ utils.update_db_object(
+ session,
+ clusterhost.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+
+ celery_client.celery.send_task(
+ 'compass.tasks.redeploy_cluster',
+ (
+ user.email, cluster_id
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'redeploy action sent',
+ 'cluster': cluster
+ }
+
+
+@utils.supported_filters(optional_support_keys=['apply_patch'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def patch_cluster(cluster_id, user=None, session=None, **kwargs):
+
+ from compass.tasks import client as celery_client
+
+ cluster = _get_cluster(cluster_id, session=session)
+ celery_client.celery.send_task(
+ 'compass.tasks.patch_cluster',
+ (
+ user.email, cluster_id,
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'patch action sent',
+ 'cluster': cluster
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTER_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def get_cluster_state(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster state info."""
+ return _get_cluster(cluster_id, session=session).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_cluster_host_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost state merged with underlying host state."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_cluster_host_self_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost itself state."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).state
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_clusterhost_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost state merged with underlying host state."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_clusterhost_self_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost itself state."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).state
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def update_cluster_host_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update a clusterhost itself state."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ # Modify(harry): without progress_update.py to update cluster state
+ # update cluster state here
+ cluster = _get_cluster(clusterhost.cluster_id, session=session)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return clusterhost.state_dict()
+
+
+def _update_clusterhost_state(
+ clusterhost, from_database_only=False,
+ session=None, user=None, **kwargs
+):
+ """Update clusterhost state.
+
+ If from_database_only, the state will only be updated in database.
+ Otherwise a task sent to celery and os installer/package installer
+ will also update its state if needed.
+ """
+ if 'ready' in kwargs and kwargs['ready'] and not clusterhost.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ cluster_ready = False
+ host = clusterhost.host
+ cluster = clusterhost.cluster
+ host_ready = not host.state.ready
+ if ready_triggered:
+ cluster_ready = True
+ for clusterhost_in_cluster in cluster.clusterhosts:
+ if (
+ clusterhost_in_cluster.clusterhost_id
+ == clusterhost.clusterhost_id
+ ):
+ continue
+ if not clusterhost_in_cluster.state.ready:
+ cluster_ready = False
+
+ logging.info(
+ 'clusterhost %s ready: %s',
+ clusterhost.name, ready_triggered
+ )
+ logging.info('cluster ready: %s', cluster_ready)
+ logging.info('host ready: %s', host_ready)
+ if not ready_triggered or from_database_only:
+ logging.info('%s state is set to %s', clusterhost.name, kwargs)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ if not clusterhost.state.ready:
+ logging.info('%s state ready is set to False', cluster.name)
+ utils.update_db_object(session, cluster.state, ready=False)
+ status = '%s state is updated' % clusterhost.name
+ else:
+ if not user:
+ user_id = cluster.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.package_installed',
+ (
+ clusterhost.cluster_id, clusterhost.host_id,
+ cluster_ready, host_ready
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s: cluster ready %s host ready %s' % (
+ clusterhost.name, cluster_ready, host_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'clusterhost': clusterhost.state_dict()
+ }
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(['status', 'clusterhost'])
+def update_cluster_host_state_internal(
+ cluster_id, host_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a clusterhost state by installation process."""
+ # TODO(xicheng): it should be merged into update_cluster_host_state
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_state(
+ clusterhost, from_database_only=from_database_only,
+ session=session, users=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def update_clusterhost_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update a clusterhost itself state."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ # Modify(harry): without progress_update.py to update cluster state
+ # update cluster state here
+ cluster = _get_cluster(clusterhost.cluster_id, session=session)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return clusterhost.state_dict()
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(['status', 'clusterhost'])
+def update_clusterhost_state_internal(
+ clusterhost_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a clusterhost state by installation process."""
+ # TODO(xicheng): it should be merged into update_clusterhost_state
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return _update_clusterhost_state(
+ clusterhost, from_database_only=from_database_only,
+ session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTER_STATE_FIELDS,
+ ignore_support_keys=(IGNORE_FIELDS + IGNORE_UPDATED_CLUSTER_STATE_FIELDS)
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def update_cluster_state(
+ cluster_id, user=None, session=None, **kwargs
+):
+ """Update a cluster state."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return cluster.state_dict()
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTER_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_STATE
+)
+@utils.wrap_to_dict(['status', 'cluster'])
+def update_cluster_state_internal(
+ cluster_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a cluster state by installation process.
+
+ If from_database_only, the state will only be updated in database.
+ Otherwise a task sent to do state update in os installer and
+ package installer.
+ """
+ # TODO(xicheng): it should be merged into update_cluster_state
+ cluster = _get_cluster(cluster_id, session=session)
+ if 'ready' in kwargs and kwargs['ready'] and not cluster.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ clusterhost_ready = {}
+ if ready_triggered:
+ for clusterhost in cluster.clusterhosts:
+ clusterhost_ready[clusterhost.host_id] = (
+ not clusterhost.state.ready
+ )
+
+ logging.info('cluster %s ready: %s', cluster_id, ready_triggered)
+ logging.info('clusterhost ready: %s', clusterhost_ready)
+
+ if not ready_triggered or from_database_only:
+ logging.info('%s state is set to %s', cluster.name, kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ if not cluster.state.ready:
+ for clusterhost in cluster.clusterhosts:
+ logging.info('%s state ready is to False', clusterhost.name)
+ utils.update_db_object(
+ session, clusterhost.state, ready=False
+ )
+ status = '%s state is updated' % cluster.name
+ else:
+ if not user:
+ user_id = cluster.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.cluster_installed',
+ (clusterhost.cluster_id, clusterhost_ready),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s installed action set clusterhost ready %s' % (
+ cluster.name, clusterhost_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'cluster': cluster.state_dict()
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_cluster_host_log_histories(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).log_histories
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_clusterhost_log_histories(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost log history by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).log_histories
+
+
+def _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id, host id and filename."""
+ clusterhost = _get_cluster_host(cluster_id, host_id, session=session)
+ return utils.get_db_object(
+ session, models.ClusterHostLogHistory,
+ clusterhost_id=clusterhost.clusterhost_id, filename=filename,
+ **kwargs
+ )
+
+
+def _get_clusterhost_log_history(
+ clusterhost_id, filename, session=None, **kwargs
+):
+ """Get clusterhost log history by clusterhost id and filename."""
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return utils.get_db_object(
+ session, models.ClusterHostLogHistory,
+ clusterhost_id=clusterhost.clusterhost_id, filename=filename,
+ **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_cluster_host_log_history(
+ cluster_id, host_id, filename, user=None, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id, host id and filename."""
+ return _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_clusterhost_log_history(
+ clusterhost_id, filename, user=None, session=None, **kwargs
+):
+ """Get host log history by clusterhost id and filename."""
+ return _get_clusterhost_log_history(
+ clusterhost_id, filename, session=session
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def update_cluster_host_log_history(
+ cluster_id, host_id, filename, user=None, session=None, **kwargs
+):
+ """Update a host log history by cluster id, host id and filename."""
+ cluster_host_log_history = _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=session
+ )
+ return utils.update_db_object(
+ session, cluster_host_log_history, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def update_clusterhost_log_history(
+ clusterhost_id, filename, user=None, session=None, **kwargs
+):
+ """Update a host log history by clusterhost id and filename."""
+ clusterhost_log_history = _get_clusterhost_log_history(
+ clusterhost_id, filename, session=session
+ )
+ return utils.update_db_object(session, clusterhost_log_history, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_CLUSTERHOST_LOG_FIELDS,
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def add_clusterhost_log_history(
+ clusterhost_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history by clusterhost id and filename."""
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return utils.add_db_object(
+ session, models.ClusterHostLogHistory,
+ exception_when_existing,
+ clusterhost.clusterhost_id, filename, **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_CLUSTERHOST_LOG_FIELDS,
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def add_cluster_host_log_history(
+ cluster_id, host_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history by cluster id, host id and filename."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return utils.add_db_object(
+ session, models.ClusterHostLogHistory, exception_when_existing,
+ clusterhost.clusterhost_id, filename, **kwargs
+ )
diff --git a/compass-deck/db/api/database.py b/compass-deck/db/api/database.py
new file mode 100644
index 0000000..49769d7
--- /dev/null
+++ b/compass-deck/db/api/database.py
@@ -0,0 +1,264 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provider interface to manipulate database."""
+import functools
+import logging
+import netaddr
+
+from contextlib import contextmanager
+from sqlalchemy import create_engine
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.exc import OperationalError
+from sqlalchemy.orm import scoped_session
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import NullPool
+from sqlalchemy.pool import QueuePool
+from sqlalchemy.pool import SingletonThreadPool
+from sqlalchemy.pool import StaticPool
+from threading import local
+
+from compass.db import exception
+from compass.db import models
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+ENGINE = None
+SESSION = sessionmaker(autocommit=False, autoflush=False)
+SCOPED_SESSION = None
+SESSION_HOLDER = local()
+
+POOL_MAPPING = {
+ 'instant': NullPool,
+ 'static': StaticPool,
+ 'queued': QueuePool,
+ 'thread_single': SingletonThreadPool
+}
+
+
+def init(database_url=None):
+ """Initialize database.
+
+ Adjust sqlalchemy logging if necessary.
+
+ :param database_url: string, database url.
+ """
+ global ENGINE
+ global SCOPED_SESSION
+ if not database_url:
+ database_url = setting.SQLALCHEMY_DATABASE_URI
+ logging.info('init database %s', database_url)
+ root_logger = logging.getLogger()
+ fine_debug = root_logger.isEnabledFor(logsetting.LOGLEVEL_MAPPING['fine'])
+ if fine_debug:
+ logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
+ finest_debug = root_logger.isEnabledFor(
+ logsetting.LOGLEVEL_MAPPING['finest']
+ )
+ if finest_debug:
+ logging.getLogger('sqlalchemy.dialects').setLevel(logging.INFO)
+ logging.getLogger('sqlalchemy.pool').setLevel(logging.INFO)
+ logging.getLogger('sqlalchemy.orm').setLevel(logging.INFO)
+ poolclass = POOL_MAPPING[setting.SQLALCHEMY_DATABASE_POOL_TYPE]
+ ENGINE = create_engine(
+ database_url, convert_unicode=True,
+ poolclass=poolclass
+ )
+ SESSION.configure(bind=ENGINE)
+ SCOPED_SESSION = scoped_session(SESSION)
+ models.BASE.query = SCOPED_SESSION.query_property()
+
+
+def in_session():
+ """check if in database session scope."""
+ bool(hasattr(SESSION_HOLDER, 'session'))
+
+
+@contextmanager
+def session(exception_when_in_session=True):
+ """database session scope.
+
+ To operate database, it should be called in database session.
+ If not exception_when_in_session, the with session statement support
+ nested session and only the out most session commit/rollback the
+ transaction.
+ """
+ if not ENGINE:
+ init()
+
+ nested_session = False
+ if hasattr(SESSION_HOLDER, 'session'):
+ if exception_when_in_session:
+ logging.error('we are already in session')
+ raise exception.DatabaseException('session already exist')
+ else:
+ new_session = SESSION_HOLDER.session
+ nested_session = True
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'reuse session %s', nested_session
+ )
+ else:
+ new_session = SCOPED_SESSION()
+ setattr(SESSION_HOLDER, 'session', new_session)
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'enter session %s', new_session
+ )
+ try:
+ yield new_session
+ if not nested_session:
+ new_session.commit()
+ except Exception as error:
+ if not nested_session:
+ new_session.rollback()
+ logging.error('failed to commit session')
+ logging.exception(error)
+ if isinstance(error, IntegrityError):
+ for item in error.statement.split():
+ if item.islower():
+ object = item
+ break
+ raise exception.DuplicatedRecord(
+ '%s in %s' % (error.orig, object)
+ )
+ elif isinstance(error, OperationalError):
+ raise exception.DatabaseException(
+ 'operation error in database'
+ )
+ elif isinstance(error, exception.DatabaseException):
+ raise error
+ else:
+ raise exception.DatabaseException(str(error))
+ finally:
+ if not nested_session:
+ new_session.close()
+ SCOPED_SESSION.remove()
+ delattr(SESSION_HOLDER, 'session')
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'exit session %s', new_session
+ )
+
+
+def current_session():
+ """Get the current session scope when it is called.
+
+ :return: database session.
+ :raises: DatabaseException when it is not in session.
+ """
+ try:
+ return SESSION_HOLDER.session
+ except Exception as error:
+ logging.error('It is not in the session scope')
+ logging.exception(error)
+ if isinstance(error, exception.DatabaseException):
+ raise error
+ else:
+ raise exception.DatabaseException(str(error))
+
+
+def run_in_session(exception_when_in_session=True):
+ """Decorator to make sure the decorated function run in session.
+
+ When not exception_when_in_session, the run_in_session can be
+ decorated several times.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ my_session = kwargs.get('session')
+ if my_session is not None:
+ return func(*args, **kwargs)
+ else:
+ with session(
+ exception_when_in_session=exception_when_in_session
+ ) as my_session:
+ kwargs['session'] = my_session
+ return func(*args, **kwargs)
+ except Exception as error:
+ logging.error(
+ 'got exception with func %s args %s kwargs %s',
+ func, args, kwargs
+ )
+ logging.exception(error)
+ raise error
+ return wrapper
+ return decorator
+
+
+def _setup_user_table(user_session):
+ """Initialize user table with default user."""
+ logging.info('setup user table')
+ from compass.db.api import user
+ user.add_user(
+ session=user_session,
+ email=setting.COMPASS_ADMIN_EMAIL,
+ password=setting.COMPASS_ADMIN_PASSWORD,
+ is_admin=True
+ )
+
+
+def _setup_permission_table(permission_session):
+ """Initialize permission table."""
+ logging.info('setup permission table.')
+ from compass.db.api import permission
+ permission.add_permissions_internal(
+ session=permission_session
+ )
+
+
+def _setup_switch_table(switch_session):
+ """Initialize switch table."""
+ # TODO(xicheng): deprecate setup default switch.
+ logging.info('setup switch table')
+ from compass.db.api import switch
+ switch.add_switch(
+ True, setting.DEFAULT_SWITCH_IP,
+ session=switch_session,
+ machine_filters=['allow ports all']
+ )
+
+
+def _update_others(other_session):
+ """Update other tables."""
+ logging.info('update other tables')
+ from compass.db.api import utils
+ from compass.db import models
+ utils.update_db_objects(
+ other_session, models.Cluster
+ )
+ utils.update_db_objects(
+ other_session, models.Host
+ )
+ utils.update_db_objects(
+ other_session, models.ClusterHost
+ )
+
+
+@run_in_session()
+def create_db(session=None):
+ """Create database."""
+ models.BASE.metadata.create_all(bind=ENGINE)
+ _setup_permission_table(session)
+ _setup_user_table(session)
+ _setup_switch_table(session)
+ _update_others(session)
+
+
+def drop_db():
+ """Drop database."""
+ models.BASE.metadata.drop_all(bind=ENGINE)
diff --git a/compass-deck/db/api/health_check_report.py b/compass-deck/db/api/health_check_report.py
new file mode 100644
index 0000000..aaea7a7
--- /dev/null
+++ b/compass-deck/db/api/health_check_report.py
@@ -0,0 +1,190 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cluster health check report."""
+import logging
+
+from compass.db.api import cluster as cluster_api
+from compass.db.api import database
+from compass.db.api import host as host_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+
+REQUIRED_INSERT_FIELDS = ['name']
+OPTIONAL_INSERT_FIELDS = [
+ 'display_name', 'report', 'category', 'state', 'error_message'
+]
+UPDATE_FIELDS = ['report', 'state', 'error_message']
+RESP_FIELDS = [
+ 'cluster_id', 'name', 'display_name', 'report',
+ 'category', 'state', 'error_message'
+]
+RESP_ACTION_FIELDS = ['cluster_id', 'status']
+
+
+@utils.supported_filters(REQUIRED_INSERT_FIELDS, OPTIONAL_INSERT_FIELDS)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_report_record(cluster_id, name=None, report={},
+ state='verifying', session=None, **kwargs):
+ """Create a health check report record."""
+ # Replace any white space into '-'
+ words = name.split()
+ name = '-'.join(words)
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ return utils.add_db_object(
+ session, models.HealthCheckReport, True, cluster.id, name,
+ report=report, state=state, **kwargs
+ )
+
+
+def _get_report(cluster_id, name, session=None):
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ return utils.get_db_object(
+ session, models.HealthCheckReport, cluster_id=cluster.id, name=name
+ )
+
+
+@utils.supported_filters(UPDATE_FIELDS)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_report(cluster_id, name, session=None, **kwargs):
+ """Update health check report."""
+ report = _get_report(cluster_id, name, session=session)
+ if report.state == 'finished':
+ err_msg = 'Report cannot be updated if state is in "finished"'
+ raise exception.Forbidden(err_msg)
+
+ return utils.update_db_object(session, report, **kwargs)
+
+
+@utils.supported_filters(UPDATE_FIELDS)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_multi_reports(cluster_id, session=None, **kwargs):
+ """Bulk update reports."""
+ # TODO(grace): rename the fuction if needed to reflect the fact.
+ return set_error(cluster_id, session=session, **kwargs)
+
+
+def set_error(cluster_id, report={}, session=None,
+ state='error', error_message=None):
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ logging.debug(
+ "updates all reports as %s in cluster %s",
+ state, cluster_id
+ )
+ return utils.update_db_objects(
+ session, models.HealthCheckReport,
+ updates={
+ 'report': {},
+ 'state': 'error',
+ 'error_message': error_message
+ }, cluster_id=cluster.id
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HEALTH_REPORT
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_health_reports(cluster_id, user=None, session=None):
+ """List all reports in the specified cluster."""
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ return utils.list_db_objects(
+ session, models.HealthCheckReport, cluster_id=cluster.id
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_HEALTH_REPORT
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_health_report(cluster_id, name, user=None, session=None):
+ return _get_report(
+ cluster_id, name, session=session
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DELETE_REPORT
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def delete_reports(cluster_id, name=None, user=None, session=None):
+ # TODO(grace): better to separate this function into two.
+ # One is to delete a report of a cluster, the other to delete all
+ # reports under a cluster.
+ if name:
+ report = _get_report(cluster_id, name, session=session)
+ return utils.del_db_object(session, report)
+ else:
+ cluster = cluster_api.get_cluster_internal(
+ cluster_id, session=session
+ )
+ return utils.del_db_objects(
+ session, models.HealthCheckReport, cluster_id=cluster.id
+ )
+
+
+@utils.supported_filters(optional_support_keys=['check_health'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_CHECK_CLUSTER_HEALTH
+)
+@utils.wrap_to_dict(RESP_ACTION_FIELDS)
+def start_check_cluster_health(cluster_id, send_report_url,
+ user=None, session=None, check_health={}):
+ """Start to check cluster health."""
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+
+ if cluster.state.state != 'SUCCESSFUL':
+ logging.debug("state is %s" % cluster.state.state)
+ err_msg = "Healthcheck starts only after cluster finished deployment!"
+ raise exception.Forbidden(err_msg)
+
+ reports = utils.list_db_objects(
+ session, models.HealthCheckReport,
+ cluster_id=cluster.id, state='verifying'
+ )
+ if reports:
+ err_msg = 'Healthcheck in progress, please wait for it to complete!'
+ raise exception.Forbidden(err_msg)
+
+ # Clear all preivous report
+ # TODO(grace): the delete should be moved into celery task.
+ # We should consider the case that celery task is down.
+ utils.del_db_objects(
+ session, models.HealthCheckReport, cluster_id=cluster.id
+ )
+
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.cluster_health',
+ (cluster.id, send_report_url, user.email),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ "cluster_id": cluster.id,
+ "status": "start to check cluster health."
+ }
diff --git a/compass-deck/db/api/host.py b/compass-deck/db/api/host.py
new file mode 100644
index 0000000..15e0bb6
--- /dev/null
+++ b/compass-deck/db/api/host.py
@@ -0,0 +1,1120 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Host database operations."""
+import functools
+import logging
+import netaddr
+import re
+
+from compass.db.api import database
+from compass.db.api import metadata_holder as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac', 'id']
+SUPPORTED_MACHINE_HOST_FIELDS = [
+ 'mac', 'tag', 'location', 'os_name', 'os_id'
+]
+SUPPORTED_NETOWORK_FIELDS = [
+ 'interface', 'ip', 'is_mgmt', 'is_promiscuous'
+]
+RESP_FIELDS = [
+ 'id', 'name', 'hostname', 'os_name', 'owner', 'mac',
+ 'switch_ip', 'port', 'switches', 'os_installer', 'os_id', 'ip',
+ 'reinstall_os', 'os_installed', 'tag', 'location', 'networks',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTER_FIELDS = [
+ 'id', 'name', 'os_name', 'reinstall_distributed_system',
+ 'owner', 'adapter_name', 'flavor_name',
+ 'distributed_system_installed', 'created_at', 'updated_at'
+]
+RESP_NETWORK_FIELDS = [
+ 'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous',
+ 'created_at', 'updated_at'
+]
+RESP_CONFIG_FIELDS = [
+ 'os_config',
+ 'config_setp',
+ 'config_validated',
+ 'networks',
+ 'created_at',
+ 'updated_at'
+]
+RESP_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'host'
+]
+UPDATED_FIELDS = ['name', 'reinstall_os']
+UPDATED_CONFIG_FIELDS = [
+ 'put_os_config'
+]
+PATCHED_CONFIG_FIELDS = [
+ 'patched_os_config'
+]
+UPDATED_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config'
+]
+ADDED_NETWORK_FIELDS = [
+ 'interface', 'ip', 'subnet_id'
+]
+OPTIONAL_ADDED_NETWORK_FIELDS = ['is_mgmt', 'is_promiscuous']
+UPDATED_NETWORK_FIELDS = [
+ 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
+ 'is_promiscuous'
+]
+IGNORE_FIELDS = [
+ 'id', 'created_at', 'updated_at'
+]
+RESP_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity', 'ready'
+]
+UPDATED_STATE_FIELDS = [
+ 'state', 'percentage', 'message', 'severity'
+]
+UPDATED_STATE_INTERNAL_FIELDS = [
+ 'ready'
+]
+RESP_LOG_FIELDS = [
+ 'id', 'filename', 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+ADDED_LOG_FIELDS = [
+ 'filename'
+]
+UPDATED_LOG_FIELDS = [
+ 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_hosts(user=None, session=None, **filters):
+ """List hosts."""
+ return utils.list_db_objects(
+ session, models.Host, **filters
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_MACHINE_HOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.output_filters(
+ missing_ok=True,
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback,
+ os_name=utils.general_filter_callback,
+ os_id=utils.general_filter_callback
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_machines_or_hosts(user=None, session=None, **filters):
+ """List machines or hosts if possible."""
+ machines = utils.list_db_objects(
+ session, models.Machine, **filters
+ )
+ machines_or_hosts = []
+ for machine in machines:
+ host = machine.host
+ if host:
+ machines_or_hosts.append(host)
+ else:
+ machines_or_hosts.append(machine)
+ return machines_or_hosts
+
+
+def _get_host(host_id, session=None, **kwargs):
+ """Get host by id."""
+ if isinstance(host_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Host,
+ id=host_id, **kwargs
+ )
+ else:
+ raise exception.InvalidParameter(
+ 'host id %s type is not int compatible' % host_id
+ )
+
+
+def get_host_internal(host_id, session=None, **kwargs):
+ """Helper function to get host.
+
+ Used by other files under db/api.
+ """
+ return _get_host(host_id, session=session, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_host(
+ host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get host info."""
+ return _get_host(
+ host_id,
+ exception_when_missing=exception_when_missing,
+ session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_machine_or_host(
+ host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get machine or host if possible."""
+ from compass.db.api import machine as machine_api
+ machine = machine_api.get_machine_internal(
+ host_id,
+ exception_when_missing=exception_when_missing,
+ session=session
+ )
+ if machine.host:
+ return machine.host
+ else:
+ return machine
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_CLUSTER_FIELDS)
+def get_host_clusters(host_id, user=None, session=None, **kwargs):
+ """get host clusters."""
+ host = _get_host(host_id, session=session)
+ return [clusterhost.cluster for clusterhost in host.clusterhosts]
+
+
+def check_host_validated(host):
+ """Check host is validated."""
+ if not host.config_validated:
+ raise exception.Forbidden(
+ 'host %s is not validated' % host.name
+ )
+
+
+def check_host_editable(
+ host, user=None,
+ check_in_installing=False
+):
+ """Check host is editable.
+
+ If we try to set reinstall_os or check the host is not in installing
+ state, we should set check_in_installing to True.
+ Otherwise we will check the host is not in installing or installed.
+ We also make sure the user is admin or the owner of the host to avoid
+ unauthorized user to update host attributes.
+ """
+ if check_in_installing:
+ if host.state.state == 'INSTALLING':
+ raise exception.Forbidden(
+ 'host %s is not editable '
+ 'when state is in installing' % host.name
+ )
+ elif not host.reinstall_os:
+ raise exception.Forbidden(
+ 'host %s is not editable '
+ 'when not to be reinstalled' % host.name
+ )
+ if user and not user.is_admin and host.creator_id != user.id:
+ raise exception.Forbidden(
+ 'host %s is not editable '
+ 'when user is not admin or the owner of the host' % host.name
+ )
+
+
+def is_host_editable(
+ host, user=None,
+ check_in_installing=False
+):
+ """Get if host is editable."""
+ try:
+ check_host_editable(
+ host, user=user,
+ check_in_installing=check_in_installing
+ )
+ return True
+ except exception.Forbidden:
+ return False
+
+
+def validate_host(host):
+ """Validate host.
+
+ Makesure hostname is not empty, there is only one mgmt network,
+ The mgmt network is not in promiscuous mode.
+ """
+ if not host.hostname:
+ raise exception.Invalidparameter(
+ 'host %s does not set hostname' % host.name
+ )
+ if not host.host_networks:
+ raise exception.InvalidParameter(
+ 'host %s does not have any network' % host.name
+ )
+ mgmt_interface_set = False
+ for host_network in host.host_networks:
+ if host_network.is_mgmt:
+ if mgmt_interface_set:
+ raise exception.InvalidParameter(
+ 'host %s multi interfaces set mgmt ' % host.name
+ )
+ if host_network.is_promiscuous:
+ raise exception.InvalidParameter(
+ 'host %s interface %s is mgmt but promiscuous' % (
+ host.name, host_network.interface
+ )
+ )
+ mgmt_interface_set = True
+ if not mgmt_interface_set:
+ raise exception.InvalidParameter(
+ 'host %s has no mgmt interface' % host.name
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@utils.wrap_to_dict(RESP_FIELDS)
+def _update_host(host_id, session=None, user=None, **kwargs):
+ """Update a host internal."""
+ host = _get_host(host_id, session=session)
+ if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
+ logging.info("ignoring successful host: %s", host_id)
+ return {}
+ check_host_editable(
+ host, user=user,
+ check_in_installing=kwargs.get('reinstall_os', False)
+ )
+ return utils.update_db_object(session, host, **kwargs)
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST
+)
+def update_host(host_id, user=None, session=None, **kwargs):
+ """Update a host."""
+ return _update_host(host_id, session=session, user=user, **kwargs)
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST
+)
+def update_hosts(data=[], user=None, session=None):
+ """Update hosts."""
+ # TODO(xicheng): this batch function is not similar as others.
+ # try to make it similar output as others and batch update should
+ # tolerate partial failure.
+ hosts = []
+ for host_data in data:
+ hosts.append(_update_host(session=session, user=user, **host_data))
+ return hosts
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS + ['status', 'host'],
+ host=RESP_FIELDS
+)
+def del_host(
+ host_id, force=False, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Delete a host.
+
+ If force, we delete the host anyway.
+ If from_database_only, we only delete the host record in databaes.
+ Otherwise we send to del host task to celery to delete the host
+ record in os installer and package installer, clean installation logs
+ and at last clean database record.
+ The backend will call this function again after it deletes the record
+ in os installer and package installer with from_database_only set.
+ """
+ from compass.db.api import cluster as cluster_api
+ host = _get_host(host_id, session=session)
+ # force set host state to ERROR when we want to delete the
+ # host anyway even the host is in installing or already
+ # installed. It let the api know the deleting is in doing when backend
+ # is doing the real deleting. In future we may import a new state like
+ # INDELETE to indicate the deleting is processing.
+ # We need discuss about if we can delete a host when it is already
+ # installed by api.
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ check_host_editable(
+ host, user=user,
+ check_in_installing=True
+ )
+ cluster_ids = []
+ for clusterhost in host.clusterhosts:
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ # TODO(grace): here we check all clusters which use this host editable.
+ # Because in backend we do not have functions to delete host without
+ # reference its cluster. After deleting pure host supported in backend,
+ # we should change code here to is_cluster_editable.
+ # Here delete a host may fail even we set force flag.
+ cluster_api.check_cluster_editable(
+ clusterhost.cluster, user=user,
+ check_in_installing=True
+ )
+ cluster_ids.append(clusterhost.cluster_id)
+
+ # Delete host record directly if there is no need to delete it
+ # in backend or from_database_only is set.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(session, host)
+ else:
+ logging.info(
+ 'send del host %s task to celery', host_id
+ )
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_host',
+ (
+ user.email, host.id, cluster_ids
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'delete action sent',
+ 'host': host,
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def get_host_config(host_id, user=None, session=None, **kwargs):
+ """Get host config."""
+ return _get_host(host_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def get_host_deployed_config(host_id, user=None, session=None, **kwargs):
+ """Get host deployed config."""
+ return _get_host(host_id, session=session)
+
+
+# replace os_config to deployed_os_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config'
+)
+@utils.supported_filters(
+ UPDATED_DEPLOYED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def update_host_deployed_config(host_id, user=None, session=None, **kwargs):
+ """Update host deployed config."""
+ host = _get_host(host_id, session=session)
+ check_host_editable(host, user=user)
+ check_host_validated(host)
+ return utils.update_db_object(session, host, **kwargs)
+
+
+def _host_os_config_validates(
+ config, host, session=None, user=None, **kwargs
+):
+ """Check host os config's validation."""
+ metadata_api.validate_os_config(
+ config, host.os_id
+ )
+
+
+@utils.input_validates_with_args(
+ put_os_config=_host_os_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_host_os_config_validates
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def _update_host_config(host, session=None, user=None, **kwargs):
+ """Update host config."""
+ check_host_editable(host, user=user)
+ return utils.update_db_object(session, host, **kwargs)
+
+
+# replace os_config to put_os_config in kwargs.
+# It tells db the os_config will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config'
+)
+@utils.supported_filters(
+ UPDATED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_CONFIG
+)
+def update_host_config(host_id, user=None, session=None, **kwargs):
+ """Update host config."""
+ host = _get_host(host_id, session=session)
+ return _update_host_config(
+ host, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config in kwargs.
+# It tells db os_config will be patched not be updated.
+@utils.replace_filters(
+ os_config='patched_os_config'
+)
+@utils.supported_filters(
+ PATCHED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_CONFIG
+)
+def patch_host_config(host_id, user=None, session=None, **kwargs):
+ """Patch host config."""
+ host = _get_host(host_id, session=session)
+ return _update_host_config(
+ host, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def del_host_config(host_id, user=None, session=None):
+ """delete a host config."""
+ host = _get_host(host_id, session=session)
+ check_host_editable(host, user=user)
+ return utils.update_db_object(
+ session, host, os_config={}, config_validated=False
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_NETOWORK_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def list_host_networks(host_id, user=None, session=None, **filters):
+ """Get host networks for a host."""
+ host = _get_host(host_id, session=session)
+ return utils.list_db_objects(
+ session, models.HostNetwork,
+ host_id=host.id, **filters
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_NETOWORK_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def list_hostnetworks(user=None, session=None, **filters):
+ """Get host networks."""
+ return utils.list_db_objects(
+ session, models.HostNetwork, **filters
+ )
+
+
+def _get_hostnetwork(host_network_id, session=None, **kwargs):
+ """Get hostnetwork by hostnetwork id."""
+ if isinstance(host_network_id, (int, long)):
+ return utils.get_db_object(
+ session, models.HostNetwork,
+ id=host_network_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'host network id %s type is not int compatible' % host_network_id
+ )
+
+
+def _get_host_network(host_id, host_network_id, session=None, **kwargs):
+ """Get hostnetwork by host id and hostnetwork id."""
+ host = _get_host(host_id, session=session)
+ host_network = _get_hostnetwork(host_network_id, session=session, **kwargs)
+ if host_network.host_id != host.id:
+ raise exception.RecordNotExists(
+ 'host %s does not own host network %s' % (
+ host.id, host_network.id
+ )
+ )
+ return host_network
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def get_host_network(
+ host_id, host_network_id,
+ user=None, session=None, **kwargs
+):
+ """Get host network."""
+ return _get_host_network(
+ host_id, host_network_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def get_hostnetwork(host_network_id, user=None, session=None, **kwargs):
+ """Get host network."""
+ return _get_hostnetwork(host_network_id, session=session)
+
+
+@utils.supported_filters(
+ ADDED_NETWORK_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_NETWORK_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def _add_host_network(
+ host_id, exception_when_existing=True,
+ session=None, user=None, interface=None, ip=None, **kwargs
+):
+ """Add hostnetwork to a host."""
+ host = _get_host(host_id, session=session)
+ check_host_editable(host, user=user)
+ user_id = user.id
+ return utils.add_db_object(
+ session, models.HostNetwork,
+ exception_when_existing,
+ host.id, interface, user_id, ip=ip, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def add_host_network(
+ host_id, exception_when_existing=True,
+ interface=None, user=None, session=None, **kwargs
+):
+ """Create a hostnetwork to a host."""
+ return _add_host_network(
+ host_id,
+ exception_when_existing,
+ interface=interface, session=session, user=user, **kwargs
+ )
+
+
+def _get_hostnetwork_by_ip(
+ ip, session=None, **kwargs
+):
+ ip_int = long(netaddr.IPAddress(ip))
+ return utils.get_db_object(
+ session, models.HostNetwork,
+ ip_int=ip_int, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def add_host_networks(
+ exception_when_existing=False,
+ data=[], user=None, session=None
+):
+ """Create host networks."""
+ hosts = []
+ failed_hosts = []
+ for host_data in data:
+ host_id = host_data['host_id']
+ host = _get_host(host_id, session=session)
+ networks = host_data['networks']
+ host_networks = []
+ failed_host_networks = []
+ for network in networks:
+ host_network = _get_hostnetwork_by_ip(
+ network['ip'], session=session,
+ exception_when_missing=False
+ )
+ if (
+ host_network and not (
+ host_network.host_id == host.id and
+ host_network.interface == network['interface']
+ )
+ ):
+ logging.error('ip %s exists in host network %s' % (
+ network['ip'], host_network.id
+ ))
+ failed_host_networks.append(network)
+ else:
+ host_networks.append(_add_host_network(
+ host.id, exception_when_existing,
+ session=session, user=user, **network
+ ))
+ if host_networks:
+ hosts.append({'host_id': host.id, 'networks': host_networks})
+ if failed_host_networks:
+ failed_hosts.append({
+ 'host_id': host.id, 'networks': failed_host_networks
+ })
+ return {
+ 'hosts': hosts,
+ 'failed_hosts': failed_hosts
+ }
+
+
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def _update_host_network(
+ host_network, session=None, user=None, **kwargs
+):
+ """Update host network."""
+ check_host_editable(host_network.host, user=user)
+ return utils.update_db_object(session, host_network, **kwargs)
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_NETWORK_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def update_host_network(
+ host_id, host_network_id, user=None, session=None, **kwargs
+):
+ """Update a host network by host id and host network id."""
+ host = _get_host(
+ host_id, session=session
+ )
+ if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
+ logging.info("ignoring updating request for successful hosts")
+ return {}
+
+ host_network = _get_host_network(
+ host_id, host_network_id, session=session
+ )
+ return _update_host_network(
+ host_network, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_NETWORK_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def update_hostnetwork(host_network_id, user=None, session=None, **kwargs):
+ """Update a host network by host network id."""
+ host_network = _get_hostnetwork(
+ host_network_id, session=session
+ )
+ return _update_host_network(
+ host_network, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST_NETWORK
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def del_host_network(
+ host_id, host_network_id, user=None,
+ session=None, **kwargs
+):
+ """Delete a host network by host id and host network id."""
+ host_network = _get_host_network(
+ host_id, host_network_id, session=session
+ )
+ check_host_editable(host_network.host, user=user)
+ return utils.del_db_object(session, host_network)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST_NETWORK
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def del_hostnetwork(host_network_id, user=None, session=None, **kwargs):
+ """Delete a host network by host network id."""
+ host_network = _get_hostnetwork(
+ host_network_id, session=session
+ )
+ check_host_editable(host_network.host, user=user)
+ return utils.del_db_object(session, host_network)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_HOST_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def get_host_state(host_id, user=None, session=None, **kwargs):
+ """Get host state info."""
+ return _get_host(host_id, session=session).state
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def update_host_state(host_id, user=None, session=None, **kwargs):
+ """Update a host state."""
+ host = _get_host(host_id, session=session)
+ utils.update_db_object(session, host.state, **kwargs)
+ return host.state
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST_STATE
+)
+@utils.wrap_to_dict(['status', 'host'])
+def update_host_state_internal(
+ host_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a host state.
+
+ This function is called when host os is installed.
+ If from_database_only, the state is updated in database.
+ Otherwise a celery task sent to os installer and package installer
+ to do some future actions.
+ """
+ # TODO(xicheng): should be merged into update_host_state
+ host = _get_host(host_id, session=session)
+ logging.info("======host state: %s", host.state)
+ if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ clusterhosts_ready = {}
+ clusters_os_ready = {}
+ if ready_triggered:
+ for clusterhost in host.clusterhosts:
+ cluster = clusterhost.cluster
+ if cluster.flavor_name:
+ clusterhosts_ready[cluster.id] = False
+ else:
+ clusterhosts_ready[cluster.id] = True
+ all_os_ready = True
+ for clusterhost_in_cluster in cluster.clusterhosts:
+ host_in_cluster = clusterhost_in_cluster.host
+ if host_in_cluster.id == host.id:
+ continue
+ if not host_in_cluster.state.ready:
+ all_os_ready = False
+ clusters_os_ready[cluster.id] = all_os_ready
+ logging.debug('host %s ready: %s', host_id, ready_triggered)
+ logging.debug("clusterhosts_ready is: %s", clusterhosts_ready)
+ logging.debug("clusters_os_ready is %s", clusters_os_ready)
+
+ if not ready_triggered or from_database_only:
+ logging.debug('%s state is set to %s', host.name, kwargs)
+ utils.update_db_object(session, host.state, **kwargs)
+ if not host.state.ready:
+ for clusterhost in host.clusterhosts:
+ utils.update_db_object(
+ session, clusterhost.state, ready=False
+ )
+ utils.update_db_object(
+ session, clusterhost.cluster.state, ready=False
+ )
+ status = '%s state is updated' % host.name
+ else:
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.os_installed',
+ (
+ host.id, clusterhosts_ready,
+ clusters_os_ready
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s: clusterhosts ready %s clusters os ready %s' % (
+ host.name, clusterhosts_ready, clusters_os_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'host': host.state
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def get_host_log_histories(host_id, user=None, session=None, **kwargs):
+ """Get host log history."""
+ host = _get_host(host_id, session=session)
+ return utils.list_db_objects(
+ session, models.HostLogHistory, id=host.id, **kwargs
+ )
+
+
+def _get_host_log_history(host_id, filename, session=None, **kwargs):
+ host = _get_host(host_id, session=session)
+ return utils.get_db_object(
+ session, models.HostLogHistory, id=host.id,
+ filename=filename, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def get_host_log_history(host_id, filename, user=None, session=None, **kwargs):
+ """Get host log history."""
+ return _get_host_log_history(
+ host_id, filename, session=session
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def update_host_log_history(
+ host_id, filename, user=None,
+ session=None, **kwargs
+):
+ """Update a host log history."""
+ host_log_history = _get_host_log_history(
+ host_id, filename, session=session
+ )
+ return utils.update_db_object(session, host_log_history, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_LOG_FIELDS,
+ optional_support_keys=UPDATED_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def add_host_log_history(
+ host_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history."""
+ host = _get_host(host_id, session=session)
+ return utils.add_db_object(
+ session, models.HostLogHistory, exception_when_existing,
+ host.id, filename, **kwargs
+ )
+
+
+@utils.supported_filters(optional_support_keys=['poweron'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ host=RESP_CONFIG_FIELDS
+)
+def poweron_host(
+ host_id, poweron={}, user=None, session=None, **kwargs
+):
+ """power on host."""
+ from compass.tasks import client as celery_client
+ host = _get_host(host_id, session=session)
+ check_host_validated(host)
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweron_host',
+ (host.id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweron %s action sent' % host.name,
+ 'host': host
+ }
+
+
+@utils.supported_filters(optional_support_keys=['poweroff'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ host=RESP_CONFIG_FIELDS
+)
+def poweroff_host(
+ host_id, poweroff={}, user=None, session=None, **kwargs
+):
+ """power off host."""
+ from compass.tasks import client as celery_client
+ host = _get_host(host_id, session=session)
+ check_host_validated(host)
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweroff_host',
+ (host.id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweroff %s action sent' % host.name,
+ 'host': host
+ }
+
+
+@utils.supported_filters(optional_support_keys=['reset'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ host=RESP_CONFIG_FIELDS
+)
+def reset_host(
+ host_id, reset={}, user=None, session=None, **kwargs
+):
+ """reset host."""
+ from compass.tasks import client as celery_client
+ host = _get_host(host_id, session=session)
+ check_host_validated(host)
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.reset_host',
+ (host.id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'reset %s action sent' % host.name,
+ 'host': host
+ }
diff --git a/compass-deck/db/api/machine.py b/compass-deck/db/api/machine.py
new file mode 100644
index 0000000..b7b16b2
--- /dev/null
+++ b/compass-deck/db/api/machine.py
@@ -0,0 +1,317 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Switch database operations."""
+import logging
+import re
+
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+MACHINE_PRIMARY_FILEDS = ['mac', 'owner_id']
+SUPPORTED_FIELDS = [
+ 'mac', 'tag', 'location',
+ 'machine_attributes', 'owner_id']
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+UPDATED_FIELDS = [
+ 'ipmi_credentials', 'machine_attributes',
+ 'tag', 'location']
+PATCHED_FIELDS = [
+ 'patched_ipmi_credentials', 'patched_tag',
+ 'patched_location'
+]
+RESP_FIELDS = [
+ 'id', 'mac', 'ipmi_credentials', 'switches', 'switch_ip',
+ 'port', 'vlans', 'machine_attributes', 'owner_id',
+ 'tag', 'location', 'created_at', 'updated_at'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'machine'
+]
+
+
+def _get_machine(machine_id, session=None, **kwargs):
+ """Get machine by id."""
+ if isinstance(machine_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Machine,
+ id=machine_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'machine id %s type is not int compatible' % machine_id
+ )
+
+
+@utils.supported_filters(
+ MACHINE_PRIMARY_FILEDS,
+ optional_support_keys=SUPPORTED_FIELDS
+)
+@utils.input_validates(mac=utils.check_mac)
+def _add_machine(mac, owner_id=None, session=None, **kwargs):
+ """Add a machine."""
+ if isinstance(owner_id, (int, long)):
+ return utils.add_db_object(
+ session, models.Machine,
+ True,
+ mac,
+ owner_id=owner_id,
+ **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'owner id %s type is not int compatible' % owner_id
+ )
+
+
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_machine(
+ mac, owner_id=None, session=None, user=None, **kwargs
+):
+ """Add a machine."""
+ return _add_machine(
+ mac,
+ owner_id=owner_id,
+ session=session, **kwargs
+ )
+
+
+def get_machine_internal(machine_id, session=None, **kwargs):
+ """Helper function to other files under db/api."""
+ return _get_machine(machine_id, session=session, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_MACHINES
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_machine(
+ machine_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a machine."""
+ return _get_machine(
+ machine_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_MACHINES
+)
+@utils.output_filters(
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_machines(user=None, session=None, **filters):
+ """List machines."""
+ machines = utils.list_db_objects(
+ session, models.Machine, **filters
+ )
+ if not user.is_admin and len(machines):
+ machines = [m for m in machines if m.owner_id == user.id]
+ return machines
+
+
+@utils.wrap_to_dict(RESP_FIELDS)
+def _update_machine(machine_id, session=None, **kwargs):
+ """Update a machine."""
+ machine = _get_machine(machine_id, session=session)
+ return utils.update_db_object(session, machine, **kwargs)
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(ipmi_credentials=utils.check_ipmi_credentials)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_MACHINE
+)
+def update_machine(machine_id, user=None, session=None, **kwargs):
+ """Update a machine."""
+ return _update_machine(
+ machine_id, session=session, **kwargs
+ )
+
+
+# replace [ipmi_credentials, tag, location] to
+# [patched_ipmi_credentials, patched_tag, patched_location]
+# in kwargs. It tells db these fields will be patched.
+@utils.replace_filters(
+ ipmi_credentials='patched_ipmi_credentials',
+ tag='patched_tag',
+ location='patched_location'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.output_validates(ipmi_credentials=utils.check_ipmi_credentials)
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_MACHINE
+)
+def patch_machine(machine_id, user=None, session=None, **kwargs):
+ """Patch a machine."""
+ return _update_machine(
+ machine_id, session=session, **kwargs
+ )
+
+
+def _check_machine_deletable(machine):
+ """Check a machine deletable."""
+ if machine.host:
+ host = machine.host
+ raise exception.NotAcceptable(
+ 'machine %s has host %s on it' % (
+ machine.mac, host.name
+ )
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_MACHINE
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_machine(machine_id, user=None, session=None, **kwargs):
+ """Delete a machine."""
+ machine = _get_machine(machine_id, session=session)
+ _check_machine_deletable(machine)
+ return utils.del_db_object(session, machine)
+
+
+@utils.supported_filters(optional_support_keys=['poweron'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ machine=RESP_FIELDS
+)
+def poweron_machine(
+ machine_id, poweron={}, user=None, session=None, **kwargs
+):
+ """power on machine."""
+ from compass.tasks import client as celery_client
+ machine = _get_machine(
+ machine_id, session=session
+ )
+ if not user:
+ user_id = machine.owner_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweron_machine',
+ (machine_id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweron %s action sent' % machine.mac,
+ 'machine': machine
+ }
+
+
+@utils.supported_filters(optional_support_keys=['poweroff'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ machine=RESP_FIELDS
+)
+def poweroff_machine(
+ machine_id, poweroff={}, user=None, session=None, **kwargs
+):
+ """power off machine."""
+ from compass.tasks import client as celery_client
+ machine = _get_machine(
+ machine_id, session=session
+ )
+ if not user:
+ user_id = machine.owner_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweroff_machine',
+ (machine_id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweroff %s action sent' % machine.mac,
+ 'machine': machine
+ }
+
+
+@utils.supported_filters(optional_support_keys=['reset'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ machine=RESP_FIELDS
+)
+def reset_machine(
+ machine_id, reset={}, user=None, session=None, **kwargs
+):
+ """reset machine."""
+ from compass.tasks import client as celery_client
+ machine = _get_machine(
+ machine_id, session=session
+ )
+ if not user:
+ user_id = machine.owner_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.reset_machine',
+ (machine_id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'reset %s action sent' % machine.mac,
+ 'machine': machine
+ }
diff --git a/compass-deck/db/api/metadata.py b/compass-deck/db/api/metadata.py
new file mode 100644
index 0000000..16310c8
--- /dev/null
+++ b/compass-deck/db/api/metadata.py
@@ -0,0 +1,517 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metadata related database operations."""
+import copy
+import logging
+import string
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import callback as metadata_callback
+from compass.db import exception
+from compass.db import models
+from compass.db import validator as metadata_validator
+
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+OS_FIELDS = None
+PACKAGE_FIELDS = None
+FLAVOR_FIELDS = None
+OSES_METADATA = None
+PACKAGES_METADATA = None
+FLAVORS_METADATA = None
+OSES_METADATA_UI_CONVERTERS = None
+FLAVORS_METADATA_UI_CONVERTERS = None
+
+
+def _get_field_from_configuration(configs):
+ """Get fields from configurations."""
+ fields = {}
+ for config in configs:
+ if not isinstance(config, dict):
+ raise exception.InvalidParameter(
+ 'config %s is not dict' % config
+ )
+ field_name = config['NAME']
+ fields[field_name] = {
+ 'name': field_name,
+ 'id': field_name,
+ 'field_type': config.get('FIELD_TYPE', basestring),
+ 'display_type': config.get('DISPLAY_TYPE', 'text'),
+ 'validator': config.get('VALIDATOR', None),
+ 'js_validator': config.get('JS_VALIDATOR', None),
+ 'description': config.get('DESCRIPTION', field_name)
+ }
+ return fields
+
+
+def _get_os_fields_from_configuration():
+ """Get os fields from os field config dir."""
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.OS_FIELD_DIR,
+ env_locals=env_locals
+ )
+ return _get_field_from_configuration(
+ configs
+ )
+
+
+def _get_package_fields_from_configuration():
+ """Get package fields from package field config dir."""
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.PACKAGE_FIELD_DIR,
+ env_locals=env_locals
+ )
+ return _get_field_from_configuration(
+ configs
+ )
+
+
+def _get_flavor_fields_from_configuration():
+ """Get flavor fields from flavor field config dir."""
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.FLAVOR_FIELD_DIR,
+ env_locals=env_locals
+ )
+ return _get_field_from_configuration(
+ configs
+ )
+
+
+def _get_metadata_from_configuration(
+ path, name, config,
+ fields, **kwargs
+):
+ """Recursively get metadata from configuration.
+
+ Args:
+ path: used to indicate the path to the root element.
+ mainly for trouble shooting.
+ name: the key of the metadata section.
+ config: the value of the metadata section.
+ fields: all fields defined in os fields or package fields dir.
+ """
+ if not isinstance(config, dict):
+ raise exception.InvalidParameter(
+ '%s config %s is not dict' % (path, config)
+ )
+ metadata_self = config.get('_self', {})
+ if 'field' in metadata_self:
+ field_name = metadata_self['field']
+ field = fields[field_name]
+ else:
+ field = {}
+ # mapping to may contain $ like $partition. Here we replace the
+ # $partition to the key of the correspendent config. The backend then
+ # can use this kind of feature to support multi partitions when we
+ # only declare the partition metadata in one place.
+ mapping_to_template = metadata_self.get('mapping_to', None)
+ if mapping_to_template:
+ mapping_to = string.Template(
+ mapping_to_template
+ ).safe_substitute(
+ **kwargs
+ )
+ else:
+ mapping_to = None
+ self_metadata = {
+ 'name': name,
+ 'display_name': metadata_self.get('display_name', name),
+ 'field_type': field.get('field_type', dict),
+ 'display_type': field.get('display_type', None),
+ 'description': metadata_self.get(
+ 'description', field.get('description', None)
+ ),
+ 'is_required': metadata_self.get('is_required', False),
+ 'required_in_whole_config': metadata_self.get(
+ 'required_in_whole_config', False),
+ 'mapping_to': mapping_to,
+ 'validator': metadata_self.get(
+ 'validator', field.get('validator', None)
+ ),
+ 'js_validator': metadata_self.get(
+ 'js_validator', field.get('js_validator', None)
+ ),
+ 'default_value': metadata_self.get('default_value', None),
+ 'default_callback': metadata_self.get('default_callback', None),
+ 'default_callback_params': metadata_self.get(
+ 'default_callback_params', {}),
+ 'options': metadata_self.get('options', None),
+ 'options_callback': metadata_self.get('options_callback', None),
+ 'options_callback_params': metadata_self.get(
+ 'options_callback_params', {}),
+ 'autofill_callback': metadata_self.get(
+ 'autofill_callback', None),
+ 'autofill_callback_params': metadata_self.get(
+ 'autofill_callback_params', {}),
+ 'required_in_options': metadata_self.get(
+ 'required_in_options', False)
+ }
+ self_metadata.update(kwargs)
+ metadata = {'_self': self_metadata}
+ # Key extension used to do two things:
+ # one is to return the extended metadata that $<something>
+ # will be replace to possible extensions.
+ # The other is to record the $<something> to extended value
+ # and used in future mapping_to subsititution.
+ # TODO(grace): select proper name instead of key_extensions if
+ # you think it is better.
+ # Suppose key_extension is {'$partition': ['/var', '/']} for $partition
+ # the metadata for $partition will be mapped to {
+ # '/var': ..., '/': ...} and kwargs={'partition': '/var'} and
+ # kwargs={'partition': '/'} will be parsed to recursive metadata parsing
+ # for sub metadata under '/var' and '/'. Then in the metadata parsing
+ # for the sub metadata, this kwargs will be used to substitute mapping_to.
+ key_extensions = metadata_self.get('key_extensions', {})
+ general_keys = []
+ for key, value in config.items():
+ if key.startswith('_'):
+ continue
+ if key in key_extensions:
+ if not key.startswith('$'):
+ raise exception.InvalidParameter(
+ '%s subkey %s should start with $' % (
+ path, key
+ )
+ )
+ extended_keys = key_extensions[key]
+ for extended_key in extended_keys:
+ if extended_key.startswith('$'):
+ raise exception.InvalidParameter(
+ '%s extended key %s should not start with $' % (
+ path, extended_key
+ )
+ )
+ sub_kwargs = dict(kwargs)
+ sub_kwargs[key[1:]] = extended_key
+ metadata[extended_key] = _get_metadata_from_configuration(
+ '%s/%s' % (path, extended_key), extended_key, value,
+ fields, **sub_kwargs
+ )
+ else:
+ if key.startswith('$'):
+ general_keys.append(key)
+ metadata[key] = _get_metadata_from_configuration(
+ '%s/%s' % (path, key), key, value,
+ fields, **kwargs
+ )
+ if len(general_keys) > 1:
+ raise exception.InvalidParameter(
+ 'foud multi general keys in %s: %s' % (
+ path, general_keys
+ )
+ )
+ return metadata
+
+
+def _get_oses_metadata_from_configuration():
+ """Get os metadata from os metadata config dir."""
+ oses_metadata = {}
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.OS_METADATA_DIR,
+ env_locals=env_locals
+ )
+ for config in configs:
+ os_name = config['OS']
+ os_metadata = oses_metadata.setdefault(os_name, {})
+ for key, value in config['METADATA'].items():
+ os_metadata[key] = _get_metadata_from_configuration(
+ key, key, value, OS_FIELDS
+ )
+
+ oses = adapter_api.OSES
+ parents = {}
+ for os_name, os in oses.items():
+ parent = os.get('parent', None)
+ parents[os_name] = parent
+ for os_name, os in oses.items():
+ oses_metadata[os_name] = util.recursive_merge_dict(
+ os_name, oses_metadata, parents
+ )
+ return oses_metadata
+
+
+def _get_packages_metadata_from_configuration():
+ """Get package metadata from package metadata config dir."""
+ packages_metadata = {}
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.PACKAGE_METADATA_DIR,
+ env_locals=env_locals
+ )
+ for config in configs:
+ adapter_name = config['ADAPTER']
+ package_metadata = packages_metadata.setdefault(adapter_name, {})
+ for key, value in config['METADATA'].items():
+ package_metadata[key] = _get_metadata_from_configuration(
+ key, key, value, PACKAGE_FIELDS
+ )
+ adapters = adapter_api.ADAPTERS
+ parents = {}
+ for adapter_name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[adapter_name] = parent
+ for adapter_name, adapter in adapters.items():
+ packages_metadata[adapter_name] = util.recursive_merge_dict(
+ adapter_name, packages_metadata, parents
+ )
+ return packages_metadata
+
+
+def _get_flavors_metadata_from_configuration():
+ """Get flavor metadata from flavor metadata config dir."""
+ flavors_metadata = {}
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.FLAVOR_METADATA_DIR,
+ env_locals=env_locals
+ )
+ for config in configs:
+ adapter_name = config['ADAPTER']
+ flavor_name = config['FLAVOR']
+ flavor_metadata = flavors_metadata.setdefault(
+ adapter_name, {}
+ ).setdefault(flavor_name, {})
+ for key, value in config['METADATA'].items():
+ flavor_metadata[key] = _get_metadata_from_configuration(
+ key, key, value, FLAVOR_FIELDS
+ )
+
+ packages_metadata = PACKAGES_METADATA
+ adapters_flavors = adapter_api.ADAPTERS_FLAVORS
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ package_metadata = packages_metadata.get(adapter_name, {})
+ for flavor_name, flavor in adapter_flavors.items():
+ flavor_metadata = flavors_metadata.setdefault(
+ adapter_name, {}
+ ).setdefault(flavor_name, {})
+ util.merge_dict(flavor_metadata, package_metadata, override=False)
+ return flavors_metadata
+
+
+def _filter_metadata(metadata, **kwargs):
+ if not isinstance(metadata, dict):
+ return metadata
+ filtered_metadata = {}
+ for key, value in metadata.items():
+ if key == '_self':
+ default_value = value.get('default_value', None)
+ if default_value is None:
+ default_callback_params = value.get(
+ 'default_callback_params', {}
+ )
+ callback_params = dict(kwargs)
+ if default_callback_params:
+ callback_params.update(default_callback_params)
+ default_callback = value.get('default_callback', None)
+ if default_callback:
+ default_value = default_callback(key, **callback_params)
+ options = value.get('options', None)
+ if options is None:
+ options_callback_params = value.get(
+ 'options_callback_params', {}
+ )
+ callback_params = dict(kwargs)
+ if options_callback_params:
+ callback_params.update(options_callback_params)
+
+ options_callback = value.get('options_callback', None)
+ if options_callback:
+ options = options_callback(key, **callback_params)
+ filtered_metadata[key] = value
+ if default_value is not None:
+ filtered_metadata[key]['default_value'] = default_value
+ if options is not None:
+ filtered_metadata[key]['options'] = options
+ else:
+ filtered_metadata[key] = _filter_metadata(value, **kwargs)
+ return filtered_metadata
+
+
+def _load_metadata(force_reload=False):
+ """Load metadata information into memory.
+
+ If force_reload, the metadata information will be reloaded
+ even if the metadata is already loaded.
+ """
+ adapter_api.load_adapters_internal(force_reload=force_reload)
+ global OS_FIELDS
+ if force_reload or OS_FIELDS is None:
+ OS_FIELDS = _get_os_fields_from_configuration()
+ global PACKAGE_FIELDS
+ if force_reload or PACKAGE_FIELDS is None:
+ PACKAGE_FIELDS = _get_package_fields_from_configuration()
+ global FLAVOR_FIELDS
+ if force_reload or FLAVOR_FIELDS is None:
+ FLAVOR_FIELDS = _get_flavor_fields_from_configuration()
+ global OSES_METADATA
+ if force_reload or OSES_METADATA is None:
+ OSES_METADATA = _get_oses_metadata_from_configuration()
+ global PACKAGES_METADATA
+ if force_reload or PACKAGES_METADATA is None:
+ PACKAGES_METADATA = _get_packages_metadata_from_configuration()
+ global FLAVORS_METADATA
+ if force_reload or FLAVORS_METADATA is None:
+ FLAVORS_METADATA = _get_flavors_metadata_from_configuration()
+ global OSES_METADATA_UI_CONVERTERS
+ if force_reload or OSES_METADATA_UI_CONVERTERS is None:
+ OSES_METADATA_UI_CONVERTERS = (
+ _get_oses_metadata_ui_converters_from_configuration()
+ )
+ global FLAVORS_METADATA_UI_CONVERTERS
+ if force_reload or FLAVORS_METADATA_UI_CONVERTERS is None:
+ FLAVORS_METADATA_UI_CONVERTERS = (
+ _get_flavors_metadata_ui_converters_from_configuration()
+ )
+
+
+def _get_oses_metadata_ui_converters_from_configuration():
+ """Get os metadata ui converters from os metadata mapping config dir.
+
+ os metadata ui converter is used to convert os metadata to
+ the format UI can understand and show.
+ """
+ oses_metadata_ui_converters = {}
+ configs = util.load_configs(setting.OS_MAPPING_DIR)
+ for config in configs:
+ os_name = config['OS']
+ oses_metadata_ui_converters[os_name] = config.get('CONFIG_MAPPING', {})
+
+ oses = adapter_api.OSES
+ parents = {}
+ for os_name, os in oses.items():
+ parent = os.get('parent', None)
+ parents[os_name] = parent
+ for os_name, os in oses.items():
+ oses_metadata_ui_converters[os_name] = util.recursive_merge_dict(
+ os_name, oses_metadata_ui_converters, parents
+ )
+ return oses_metadata_ui_converters
+
+
+def _get_flavors_metadata_ui_converters_from_configuration():
+ """Get flavor metadata ui converters from flavor mapping config dir."""
+ flavors_metadata_ui_converters = {}
+ configs = util.load_configs(setting.FLAVOR_MAPPING_DIR)
+ for config in configs:
+ adapter_name = config['ADAPTER']
+ flavor_name = config['FLAVOR']
+ flavors_metadata_ui_converters.setdefault(
+ adapter_name, {}
+ )[flavor_name] = config.get('CONFIG_MAPPING', {})
+ adapters = adapter_api.ADAPTERS
+ parents = {}
+ for adapter_name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[adapter_name] = parent
+ for adapter_name, adapter in adapters.items():
+ flavors_metadata_ui_converters[adapter_name] = (
+ util.recursive_merge_dict(
+ adapter_name, flavors_metadata_ui_converters, parents
+ )
+ )
+ return flavors_metadata_ui_converters
+
+
+def get_packages_metadata_internal(force_reload=False):
+ """Get deployable package metadata."""
+ _load_metadata(force_reload=force_reload)
+ metadata_mapping = {}
+ adapters = adapter_api.ADAPTERS
+ for adapter_name, adapter in adapters.items():
+ if adapter.get('deployable'):
+ metadata_mapping[adapter_name] = _filter_metadata(
+ PACKAGES_METADATA.get(adapter_name, {})
+ )
+ else:
+ logging.info(
+ 'ignore metadata since its adapter %s is not deployable',
+ adapter_name
+ )
+ return metadata_mapping
+
+
+def get_flavors_metadata_internal(force_reload=False):
+ """Get deployable flavor metadata."""
+ _load_metadata(force_reload=force_reload)
+ metadata_mapping = {}
+ adapters_flavors = adapter_api.ADAPTERS_FLAVORS
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ adapter = adapter_api.ADAPTERS[adapter_name]
+ if not adapter.get('deployable'):
+ logging.info(
+ 'ignore metadata since its adapter %s is not deployable',
+ adapter_name
+ )
+ continue
+ for flavor_name, flavor in adapter_flavors.items():
+ flavor_metadata = FLAVORS_METADATA.get(
+ adapter_name, {}
+ ).get(flavor_name, {})
+ metadata = _filter_metadata(flavor_metadata)
+ metadata_mapping.setdefault(
+ adapter_name, {}
+ )[flavor_name] = metadata
+ return metadata_mapping
+
+
+def get_flavors_metadata_ui_converters_internal(force_reload=False):
+ """Get usable flavor metadata ui converters."""
+ _load_metadata(force_reload=force_reload)
+ return FLAVORS_METADATA_UI_CONVERTERS
+
+
+def get_oses_metadata_internal(force_reload=False):
+ """Get deployable os metadata."""
+ _load_metadata(force_reload=force_reload)
+ metadata_mapping = {}
+ oses = adapter_api.OSES
+ for os_name, os in oses.items():
+ if os.get('deployable'):
+ metadata_mapping[os_name] = _filter_metadata(
+ OSES_METADATA.get(os_name, {})
+ )
+ else:
+ logging.info(
+ 'ignore metadata since its os %s is not deployable',
+ os_name
+ )
+ return metadata_mapping
+
+
+def get_oses_metadata_ui_converters_internal(force_reload=False):
+ """Get usable os metadata ui converters."""
+ _load_metadata(force_reload=force_reload)
+ return OSES_METADATA_UI_CONVERTERS
diff --git a/compass-deck/db/api/metadata_holder.py b/compass-deck/db/api/metadata_holder.py
new file mode 100644
index 0000000..24afc67
--- /dev/null
+++ b/compass-deck/db/api/metadata_holder.py
@@ -0,0 +1,731 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metadata related object holder."""
+import logging
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import adapter_holder as adapter_holder_api
+from compass.db.api import database
+from compass.db.api import metadata as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+RESP_METADATA_FIELDS = [
+ 'os_config', 'package_config'
+]
+RESP_UI_METADATA_FIELDS = [
+ 'os_global_config', 'flavor_config'
+]
+
+
+def load_metadatas(force_reload=False):
+ """Load metadatas."""
+ # TODO(xicheng): today we load metadata in memory as it original
+ # format in files in metadata.py. We get these inmemory metadata
+ # and do some translation, store the translated metadata into memory
+ # too in metadata_holder.py. api can only access the global inmemory
+ # data in metadata_holder.py.
+ _load_os_metadatas(force_reload=force_reload)
+ _load_package_metadatas(force_reload=force_reload)
+ _load_flavor_metadatas(force_reload=force_reload)
+ _load_os_metadata_ui_converters(force_reload=force_reload)
+ _load_flavor_metadata_ui_converters(force_reload=force_reload)
+
+
+def _load_os_metadata_ui_converters(force_reload=False):
+ global OS_METADATA_UI_CONVERTERS
+ if force_reload or OS_METADATA_UI_CONVERTERS is None:
+ logging.info('load os metadatas ui converters into memory')
+ OS_METADATA_UI_CONVERTERS = (
+ metadata_api.get_oses_metadata_ui_converters_internal(
+ force_reload=force_reload
+ )
+ )
+
+
+def _load_os_metadatas(force_reload=False):
+ """Load os metadata from inmemory db and map it by os_id."""
+ global OS_METADATA_MAPPING
+ if force_reload or OS_METADATA_MAPPING is None:
+ logging.info('load os metadatas into memory')
+ OS_METADATA_MAPPING = metadata_api.get_oses_metadata_internal(
+ force_reload=force_reload
+ )
+
+
+def _load_flavor_metadata_ui_converters(force_reload=False):
+ """Load flavor metadata ui converters from inmemory db.
+
+ The loaded metadata is mapped by flavor id.
+ """
+ global FLAVOR_METADATA_UI_CONVERTERS
+ if force_reload or FLAVOR_METADATA_UI_CONVERTERS is None:
+ logging.info('load flavor metadata ui converters into memory')
+ FLAVOR_METADATA_UI_CONVERTERS = {}
+ adapters_flavors_metadata_ui_converters = (
+ metadata_api.get_flavors_metadata_ui_converters_internal(
+ force_reload=force_reload
+ )
+ )
+ for adapter_name, adapter_flavors_metadata_ui_converters in (
+ adapters_flavors_metadata_ui_converters.items()
+ ):
+ for flavor_name, flavor_metadata_ui_converter in (
+ adapter_flavors_metadata_ui_converters.items()
+ ):
+ FLAVOR_METADATA_UI_CONVERTERS[
+ '%s:%s' % (adapter_name, flavor_name)
+ ] = flavor_metadata_ui_converter
+
+
+@util.deprecated
+def _load_package_metadatas(force_reload=False):
+ """Load deployable package metadata from inmemory db."""
+ global PACKAGE_METADATA_MAPPING
+ if force_reload or PACKAGE_METADATA_MAPPING is None:
+ logging.info('load package metadatas into memory')
+ PACKAGE_METADATA_MAPPING = (
+ metadata_api.get_packages_metadata_internal(
+ force_reload=force_reload
+ )
+ )
+
+
+def _load_flavor_metadatas(force_reload=False):
+ """Load flavor metadata from inmemory db.
+
+ The loaded metadata are mapped by flavor id.
+ """
+ global FLAVOR_METADATA_MAPPING
+ if force_reload or FLAVOR_METADATA_MAPPING is None:
+ logging.info('load flavor metadatas into memory')
+ FLAVOR_METADATA_MAPPING = {}
+ adapters_flavors_metadata = (
+ metadata_api.get_flavors_metadata_internal(
+ force_reload=force_reload
+ )
+ )
+ for adapter_name, adapter_flavors_metadata in (
+ adapters_flavors_metadata.items()
+ ):
+ for flavor_name, flavor_metadata in (
+ adapter_flavors_metadata.items()
+ ):
+ FLAVOR_METADATA_MAPPING[
+ '%s:%s' % (adapter_name, flavor_name)
+ ] = flavor_metadata
+
+
+OS_METADATA_MAPPING = None
+PACKAGE_METADATA_MAPPING = None
+FLAVOR_METADATA_MAPPING = None
+OS_METADATA_UI_CONVERTERS = None
+FLAVOR_METADATA_UI_CONVERTERS = None
+
+
+def validate_os_config(
+ config, os_id, whole_check=False, **kwargs
+):
+ """Validate os config."""
+ load_metadatas()
+ if os_id not in OS_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'os %s is not found in os metadata mapping' % os_id
+ )
+ _validate_config(
+ '', config, OS_METADATA_MAPPING[os_id],
+ whole_check, **kwargs
+ )
+
+
+@util.deprecated
+def validate_package_config(
+ config, adapter_id, whole_check=False, **kwargs
+):
+ """Validate package config."""
+ load_metadatas()
+ if adapter_id not in PACKAGE_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'adapter %s is not found in package metedata mapping' % adapter_id
+ )
+ _validate_config(
+ '', config, PACKAGE_METADATA_MAPPING[adapter_id],
+ whole_check, **kwargs
+ )
+
+
+def validate_flavor_config(
+ config, flavor_id, whole_check=False, **kwargs
+):
+ """Validate flavor config."""
+ load_metadatas()
+ if not flavor_id:
+ logging.info('There is no flavor, skipping flavor validation...')
+ elif flavor_id not in FLAVOR_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'flavor %s is not found in flavor metedata mapping' % flavor_id
+ )
+ else:
+ _validate_config(
+ '', config, FLAVOR_METADATA_MAPPING[flavor_id],
+ whole_check, **kwargs
+ )
+
+
+def _filter_metadata(metadata, **kwargs):
+ """Filter metadata before return it to api.
+
+ Some metadata fields are not json compatible or
+ only used in db/api internally.
+ We should strip these fields out before return to api.
+ """
+ if not isinstance(metadata, dict):
+ return metadata
+ filtered_metadata = {}
+ for key, value in metadata.items():
+ if key == '_self':
+ filtered_metadata[key] = {
+ 'name': value['name'],
+ 'description': value.get('description', None),
+ 'default_value': value.get('default_value', None),
+ 'is_required': value.get('is_required', False),
+ 'required_in_whole_config': value.get(
+ 'required_in_whole_config', False),
+ 'js_validator': value.get('js_validator', None),
+ 'options': value.get('options', None),
+ 'required_in_options': value.get(
+ 'required_in_options', False),
+ 'field_type': value.get(
+ 'field_type_data', 'str'),
+ 'display_type': value.get('display_type', None),
+ 'mapping_to': value.get('mapping_to', None)
+ }
+ else:
+ filtered_metadata[key] = _filter_metadata(value, **kwargs)
+ return filtered_metadata
+
+
+@util.deprecated
+def _get_package_metadata(adapter_id):
+ """get package metadata."""
+ load_metadatas()
+ if adapter_id not in PACKAGE_METADATA_MAPPING:
+ raise exception.RecordNotExists(
+ 'adpater %s does not exist' % adapter_id
+ )
+ return _filter_metadata(
+ PACKAGE_METADATA_MAPPING[adapter_id]
+ )
+
+
+@util.deprecated
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_package_metadata(adapter_id, user=None, session=None, **kwargs):
+ """Get package metadata from adapter."""
+ return {
+ 'package_config': _get_package_metadata(adapter_id)
+ }
+
+
+def _get_flavor_metadata(flavor_id):
+ """get flavor metadata."""
+ load_metadatas()
+ if not flavor_id:
+ logging.info('There is no flavor id, skipping...')
+ elif flavor_id not in FLAVOR_METADATA_MAPPING:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ else:
+ return _filter_metadata(FLAVOR_METADATA_MAPPING[flavor_id])
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_flavor_metadata(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor metadata by flavor."""
+ return {
+ 'package_config': _get_flavor_metadata(flavor_id)
+ }
+
+
+def _get_os_metadata(os_id):
+ """get os metadata."""
+ load_metadatas()
+ if os_id not in OS_METADATA_MAPPING:
+ raise exception.RecordNotExists(
+ 'os %s does not exist' % os_id
+ )
+ return _filter_metadata(OS_METADATA_MAPPING[os_id])
+
+
+def _get_os_metadata_ui_converter(os_id):
+ """get os metadata ui converter."""
+ load_metadatas()
+ if os_id not in OS_METADATA_UI_CONVERTERS:
+ raise exception.RecordNotExists(
+ 'os %s does not exist' % os_id
+ )
+ return OS_METADATA_UI_CONVERTERS[os_id]
+
+
+def _get_flavor_metadata_ui_converter(flavor_id):
+ """get flavor metadata ui converter."""
+ load_metadatas()
+ if flavor_id not in FLAVOR_METADATA_UI_CONVERTERS:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ return FLAVOR_METADATA_UI_CONVERTERS[flavor_id]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_os_metadata(os_id, user=None, session=None, **kwargs):
+ """get os metadatas."""
+ return {'os_config': _get_os_metadata(os_id)}
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
+def get_os_ui_metadata(os_id, user=None, session=None, **kwargs):
+ """Get os metadata ui converter by os."""
+ metadata = _get_os_metadata(os_id)
+ metadata_ui_converter = _get_os_metadata_ui_converter(os_id)
+ return _get_ui_metadata(metadata, metadata_ui_converter)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
+def get_flavor_ui_metadata(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor ui metadata by flavor."""
+ metadata = _get_flavor_metadata(flavor_id)
+ metadata_ui_converter = _get_flavor_metadata_ui_converter(flavor_id)
+ return _get_ui_metadata(metadata, metadata_ui_converter)
+
+
+def _get_ui_metadata(metadata, metadata_ui_converter):
+ """convert metadata to ui metadata.
+
+ Args:
+ metadata: metadata we defined in metadata files.
+ metadata_ui_converter: metadata ui converter defined in metadata
+ mapping files. Used to convert orignal
+ metadata to ui understandable metadata.
+
+ Returns:
+ ui understandable metadata.
+ """
+ ui_metadata = {}
+ ui_metadata[metadata_ui_converter['mapped_name']] = []
+ for mapped_child in metadata_ui_converter['mapped_children']:
+ data_dict = {}
+ for ui_key, ui_value in mapped_child.items():
+ for key, value in ui_value.items():
+ if 'data' == key:
+ result_data = []
+ _get_ui_metadata_data(
+ metadata[ui_key], value, result_data
+ )
+ data_dict['data'] = result_data
+ else:
+ data_dict[key] = value
+ ui_metadata[metadata_ui_converter['mapped_name']].append(data_dict)
+ return ui_metadata
+
+
+def _get_ui_metadata_data(metadata, config, result_data):
+ """Get ui metadata data and fill to result."""
+ data_dict = {}
+ for key, config_value in config.items():
+ if isinstance(config_value, dict) and key != 'content_data':
+ if key in metadata.keys():
+ _get_ui_metadata_data(metadata[key], config_value, result_data)
+ else:
+ _get_ui_metadata_data(metadata, config_value, result_data)
+ elif isinstance(config_value, list):
+ option_list = []
+ for item in config_value:
+ if isinstance(item, dict):
+ option_list.append(item)
+ data_dict[key] = option_list
+ else:
+ if isinstance(metadata['_self'][item], bool):
+ data_dict[item] = str(metadata['_self'][item]).lower()
+ else:
+ data_dict[item] = metadata['_self'][item]
+ else:
+ data_dict[key] = config_value
+ if data_dict:
+ result_data.append(data_dict)
+ return result_data
+
+
+@util.deprecated
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_package_os_metadata(
+ adapter_id, os_id,
+ user=None, session=None, **kwargs
+):
+ """Get metadata by adapter and os."""
+ adapter = adapter_holder_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ os_ids = [os['id'] for os in adapter['supported_oses']]
+ if os_id not in os_ids:
+ raise exception.InvalidParameter(
+ 'os %s is not in the supported os list of adapter %s' % (
+ os_id, adapter_id
+ )
+ )
+ metadatas = {}
+ metadatas['os_config'] = _get_os_metadata(
+ os_id
+ )
+ metadatas['package_config'] = _get_package_metadata(
+ adapter_id
+ )
+ return metadatas
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_flavor_os_metadata(
+ flavor_id, os_id,
+ user=None, session=None, **kwargs
+):
+ """Get metadata by flavor and os."""
+ flavor = adapter_holder_api.get_flavor(
+ flavor_id, user=user, session=session
+ )
+ adapter_id = flavor['adapter_id']
+ adapter = adapter_holder_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ os_ids = [os['id'] for os in adapter['supported_oses']]
+ if os_id not in os_ids:
+ raise exception.InvalidParameter(
+ 'os %s is not in the supported os list of adapter %s' % (
+ os_id, adapter_id
+ )
+ )
+ metadatas = {}
+ metadatas['os_config'] = _get_os_metadata(
+ session, os_id
+ )
+ metadatas['package_config'] = _get_flavor_metadata(
+ session, flavor_id
+ )
+ return metadatas
+
+
+def _validate_self(
+ config_path, config_key, config,
+ metadata, whole_check,
+ **kwargs
+):
+ """validate config by metadata self section."""
+ logging.debug('validate config self %s', config_path)
+ if '_self' not in metadata:
+ if isinstance(config, dict):
+ _validate_config(
+ config_path, config, metadata, whole_check, **kwargs
+ )
+ return
+ field_type = metadata['_self'].get('field_type', basestring)
+ if not isinstance(config, field_type):
+ raise exception.InvalidParameter(
+ '%s config type is not %s: %s' % (config_path, field_type, config)
+ )
+ is_required = metadata['_self'].get(
+ 'is_required', False
+ )
+ required_in_whole_config = metadata['_self'].get(
+ 'required_in_whole_config', False
+ )
+ if isinstance(config, basestring):
+ if config == '' and not is_required and not required_in_whole_config:
+ # ignore empty config when it is optional
+ return
+ required_in_options = metadata['_self'].get(
+ 'required_in_options', False
+ )
+ options = metadata['_self'].get('options', None)
+ if required_in_options:
+ if field_type in [int, basestring, float, bool]:
+ if options and config not in options:
+ raise exception.InvalidParameter(
+ '%s config is not in %s: %s' % (
+ config_path, options, config
+ )
+ )
+ elif field_type in [list, tuple]:
+ if options and not set(config).issubset(set(options)):
+ raise exception.InvalidParameter(
+ '%s config is not in %s: %s' % (
+ config_path, options, config
+ )
+ )
+ elif field_type == dict:
+ if options and not set(config.keys()).issubset(set(options)):
+ raise exception.InvalidParameter(
+ '%s config is not in %s: %s' % (
+ config_path, options, config
+ )
+ )
+ validator = metadata['_self'].get('validator', None)
+ logging.debug('validate by validator %s', validator)
+ if validator:
+ if not validator(config_key, config, **kwargs):
+ raise exception.InvalidParameter(
+ '%s config is invalid' % config_path
+ )
+ if isinstance(config, dict):
+ _validate_config(
+ config_path, config, metadata, whole_check, **kwargs
+ )
+
+
+def _validate_config(
+ config_path, config, metadata, whole_check,
+ **kwargs
+):
+ """validate config by metadata."""
+ logging.debug('validate config %s', config_path)
+ generals = {}
+ specified = {}
+ for key, value in metadata.items():
+ if key.startswith('$'):
+ generals[key] = value
+ elif key.startswith('_'):
+ pass
+ else:
+ specified[key] = value
+ config_keys = set(config.keys())
+ specified_keys = set(specified.keys())
+ intersect_keys = config_keys & specified_keys
+ not_found_keys = config_keys - specified_keys
+ redundant_keys = specified_keys - config_keys
+ for key in redundant_keys:
+ if '_self' not in specified[key]:
+ continue
+ if specified[key]['_self'].get('is_required', False):
+ raise exception.InvalidParameter(
+ '%s/%s does not find but it is required' % (
+ config_path, key
+ )
+ )
+ if (
+ whole_check and
+ specified[key]['_self'].get(
+ 'required_in_whole_config', False
+ )
+ ):
+ raise exception.InvalidParameter(
+ '%s/%s does not find but it is required in whole config' % (
+ config_path, key
+ )
+ )
+ for key in intersect_keys:
+ _validate_self(
+ '%s/%s' % (config_path, key),
+ key, config[key], specified[key], whole_check,
+ **kwargs
+ )
+ for key in not_found_keys:
+ if not generals:
+ raise exception.InvalidParameter(
+ 'key %s missing in metadata %s' % (
+ key, config_path
+ )
+ )
+ for general_key, general_value in generals.items():
+ _validate_self(
+ '%s/%s' % (config_path, key),
+ key, config[key], general_value, whole_check,
+ **kwargs
+ )
+
+
+def _autofill_self_config(
+ config_path, config_key, config,
+ metadata,
+ **kwargs
+):
+ """Autofill config by metadata self section."""
+ if '_self' not in metadata:
+ if isinstance(config, dict):
+ _autofill_config(
+ config_path, config, metadata, **kwargs
+ )
+ return config
+ logging.debug(
+ 'autofill %s by metadata %s', config_path, metadata['_self']
+ )
+ autofill_callback = metadata['_self'].get(
+ 'autofill_callback', None
+ )
+ autofill_callback_params = metadata['_self'].get(
+ 'autofill_callback_params', {}
+ )
+ callback_params = dict(kwargs)
+ if autofill_callback_params:
+ callback_params.update(autofill_callback_params)
+ default_value = metadata['_self'].get(
+ 'default_value', None
+ )
+ if default_value is not None:
+ callback_params['default_value'] = default_value
+ options = metadata['_self'].get(
+ 'options', None
+ )
+ if options is not None:
+ callback_params['options'] = options
+ if autofill_callback:
+ config = autofill_callback(
+ config_key, config, **callback_params
+ )
+ if config is None:
+ new_config = {}
+ else:
+ new_config = config
+ if isinstance(new_config, dict):
+ _autofill_config(
+ config_path, new_config, metadata, **kwargs
+ )
+ if new_config:
+ config = new_config
+ return config
+
+
+def _autofill_config(
+ config_path, config, metadata, **kwargs
+):
+ """autofill config by metadata."""
+ generals = {}
+ specified = {}
+ for key, value in metadata.items():
+ if key.startswith('$'):
+ generals[key] = value
+ elif key.startswith('_'):
+ pass
+ else:
+ specified[key] = value
+ config_keys = set(config.keys())
+ specified_keys = set(specified.keys())
+ intersect_keys = config_keys & specified_keys
+ not_found_keys = config_keys - specified_keys
+ redundant_keys = specified_keys - config_keys
+ for key in redundant_keys:
+ self_config = _autofill_self_config(
+ '%s/%s' % (config_path, key),
+ key, None, specified[key], **kwargs
+ )
+ if self_config is not None:
+ config[key] = self_config
+ for key in intersect_keys:
+ config[key] = _autofill_self_config(
+ '%s/%s' % (config_path, key),
+ key, config[key], specified[key],
+ **kwargs
+ )
+ for key in not_found_keys:
+ for general_key, general_value in generals.items():
+ config[key] = _autofill_self_config(
+ '%s/%s' % (config_path, key),
+ key, config[key], general_value,
+ **kwargs
+ )
+ return config
+
+
+def autofill_os_config(
+ config, os_id, **kwargs
+):
+ load_metadatas()
+ if os_id not in OS_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'os %s is not found in os metadata mapping' % os_id
+ )
+
+ return _autofill_config(
+ '', config, OS_METADATA_MAPPING[os_id], **kwargs
+ )
+
+
+def autofill_package_config(
+ config, adapter_id, **kwargs
+):
+ load_metadatas()
+ if adapter_id not in PACKAGE_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'adapter %s is not found in package metadata mapping' % adapter_id
+ )
+
+ return _autofill_config(
+ '', config, PACKAGE_METADATA_MAPPING[adapter_id], **kwargs
+ )
+
+
+def autofill_flavor_config(
+ config, flavor_id, **kwargs
+):
+ load_metadatas()
+ if not flavor_id:
+ logging.info('There is no flavor, skipping...')
+ elif flavor_id not in FLAVOR_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'flavor %s is not found in flavor metadata mapping' % flavor_id
+ )
+ else:
+ return _autofill_config(
+ '', config, FLAVOR_METADATA_MAPPING[flavor_id], **kwargs
+ )
diff --git a/compass-deck/db/api/network.py b/compass-deck/db/api/network.py
new file mode 100644
index 0000000..e2bf7d3
--- /dev/null
+++ b/compass-deck/db/api/network.py
@@ -0,0 +1,160 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Network related database operations."""
+import logging
+import netaddr
+import re
+
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+
+SUPPORTED_FIELDS = ['subnet', 'name']
+RESP_FIELDS = [
+ 'id', 'name', 'subnet', 'created_at', 'updated_at'
+]
+ADDED_FIELDS = ['subnet']
+OPTIONAL_ADDED_FIELDS = ['name']
+IGNORE_FIELDS = [
+ 'id', 'created_at', 'updated_at'
+]
+UPDATED_FIELDS = ['subnet', 'name']
+
+
+def _check_subnet(subnet):
+ """Check subnet format is correct."""
+ try:
+ netaddr.IPNetwork(subnet)
+ except Exception as error:
+ logging.exception(error)
+ raise exception.InvalidParameter(
+ 'subnet %s format unrecognized' % subnet)
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SUBNETS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_subnets(user=None, session=None, **filters):
+ """List subnets."""
+ return utils.list_db_objects(
+ session, models.Subnet, **filters
+ )
+
+
+def _get_subnet(subnet_id, session=None, **kwargs):
+ """Get subnet by subnet id."""
+ if isinstance(subnet_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Subnet,
+ id=subnet_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'subnet id %s type is not int compatible' % subnet_id
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SUBNETS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_subnet(
+ subnet_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get subnet info."""
+ return _get_subnet(
+ subnet_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ ADDED_FIELDS, optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(subnet=_check_subnet)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SUBNET
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_subnet(
+ exception_when_existing=True, subnet=None,
+ user=None, session=None, **kwargs
+):
+ """Create a subnet."""
+ return utils.add_db_object(
+ session, models.Subnet,
+ exception_when_existing, subnet, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(subnet=_check_subnet)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SUBNET
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_subnet(subnet_id, user=None, session=None, **kwargs):
+ """Update a subnet."""
+ subnet = _get_subnet(
+ subnet_id, session=session
+ )
+ return utils.update_db_object(session, subnet, **kwargs)
+
+
+def _check_subnet_deletable(subnet):
+ """Check a subnet deletable."""
+ if subnet.host_networks:
+ host_networks = [
+ '%s:%s=%s' % (
+ host_network.host.name, host_network.interface,
+ host_network.ip
+ )
+ for host_network in subnet.host_networks
+ ]
+ raise exception.NotAcceptable(
+ 'subnet %s contains host networks %s' % (
+ subnet.subnet, host_networks
+ )
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SUBNET
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_subnet(subnet_id, user=None, session=None, **kwargs):
+ """Delete a subnet."""
+ subnet = _get_subnet(
+ subnet_id, session=session
+ )
+ _check_subnet_deletable(subnet)
+ return utils.del_db_object(session, subnet)
diff --git a/compass-deck/db/api/permission.py b/compass-deck/db/api/permission.py
new file mode 100644
index 0000000..f4d777a
--- /dev/null
+++ b/compass-deck/db/api/permission.py
@@ -0,0 +1,357 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Permission database operations."""
+import re
+
+from compass.db.api import database
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['id', 'name', 'alias', 'description']
+RESP_FIELDS = ['id', 'name', 'alias', 'description']
+
+
+class PermissionWrapper(object):
+ def __init__(self, name, alias, description):
+ self.name = name
+ self.alias = alias
+ self.description = description
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'alias': self.alias,
+ 'description': self.description
+ }
+
+
+PERMISSION_LIST_PERMISSIONS = PermissionWrapper(
+ 'list_permissions', 'list permissions', 'list all permissions'
+)
+PERMISSION_LIST_SWITCHES = PermissionWrapper(
+ 'list_switches', 'list switches', 'list all switches'
+)
+PERMISSION_LIST_SWITCH_FILTERS = PermissionWrapper(
+ 'list_switch_filters',
+ 'list switch filters',
+ 'list switch filters'
+)
+PERMISSION_ADD_SWITCH = PermissionWrapper(
+ 'add_switch', 'add switch', 'add switch'
+)
+PERMISSION_UPDATE_SWITCH_FILTERS = PermissionWrapper(
+ 'update_switch_filters',
+ 'update switch filters',
+ 'update switch filters'
+)
+PERMISSION_DEL_SWITCH = PermissionWrapper(
+ 'delete_switch', 'delete switch', 'delete switch'
+)
+PERMISSION_LIST_SWITCH_MACHINES = PermissionWrapper(
+ 'list_switch_machines', 'list switch machines', 'list switch machines'
+)
+PERMISSION_ADD_SWITCH_MACHINE = PermissionWrapper(
+ 'add_switch_machine', 'add switch machine', 'add switch machine'
+)
+PERMISSION_DEL_SWITCH_MACHINE = PermissionWrapper(
+ 'del_switch_machine', 'delete switch machine', 'del switch machine'
+)
+PERMISSION_UPDATE_SWITCH_MACHINES = PermissionWrapper(
+ 'update_switch_machines',
+ 'update switch machines',
+ 'update switch machines'
+)
+PERMISSION_LIST_MACHINES = PermissionWrapper(
+ 'list_machines', 'list machines', 'list machines'
+)
+PERMISSION_ADD_MACHINE = PermissionWrapper(
+ 'add_machine', 'add machine', 'add machine'
+)
+PERMISSION_DEL_MACHINE = PermissionWrapper(
+ 'delete_machine', 'delete machine', 'delete machine'
+)
+PERMISSION_LIST_ADAPTERS = PermissionWrapper(
+ 'list_adapters', 'list adapters', 'list adapters'
+)
+PERMISSION_LIST_METADATAS = PermissionWrapper(
+ 'list_metadatas', 'list metadatas', 'list metadatas'
+)
+PERMISSION_LIST_SUBNETS = PermissionWrapper(
+ 'list_subnets', 'list subnets', 'list subnets'
+)
+PERMISSION_ADD_SUBNET = PermissionWrapper(
+ 'add_subnet', 'add subnet', 'add subnet'
+)
+PERMISSION_DEL_SUBNET = PermissionWrapper(
+ 'del_subnet', 'del subnet', 'del subnet'
+)
+PERMISSION_LIST_CLUSTERS = PermissionWrapper(
+ 'list_clusters', 'list clusters', 'list clusters'
+)
+PERMISSION_ADD_CLUSTER = PermissionWrapper(
+ 'add_cluster', 'add cluster', 'add cluster'
+)
+PERMISSION_DEL_CLUSTER = PermissionWrapper(
+ 'del_cluster', 'del cluster', 'del cluster'
+)
+PERMISSION_LIST_CLUSTER_CONFIG = PermissionWrapper(
+ 'list_cluster_config', 'list cluster config', 'list cluster config'
+)
+PERMISSION_ADD_CLUSTER_CONFIG = PermissionWrapper(
+ 'add_cluster_config', 'add cluster config', 'add cluster config'
+)
+PERMISSION_DEL_CLUSTER_CONFIG = PermissionWrapper(
+ 'del_cluster_config', 'del cluster config', 'del cluster config'
+)
+PERMISSION_UPDATE_CLUSTER_HOSTS = PermissionWrapper(
+ 'update_cluster_hosts',
+ 'update cluster hosts',
+ 'update cluster hosts'
+)
+PERMISSION_DEL_CLUSTER_HOST = PermissionWrapper(
+ 'del_clusterhost', 'delete clusterhost', 'delete clusterhost'
+)
+PERMISSION_REVIEW_CLUSTER = PermissionWrapper(
+ 'review_cluster', 'review cluster', 'review cluster'
+)
+PERMISSION_DEPLOY_CLUSTER = PermissionWrapper(
+ 'deploy_cluster', 'deploy cluster', 'deploy cluster'
+)
+PERMISSION_DEPLOY_HOST = PermissionWrapper(
+ 'deploy_host', 'deploy host', 'deploy host'
+)
+PERMISSION_GET_CLUSTER_STATE = PermissionWrapper(
+ 'get_cluster_state', 'get cluster state', 'get cluster state'
+)
+PERMISSION_UPDATE_CLUSTER_STATE = PermissionWrapper(
+ 'update_cluster_state', 'update cluster state',
+ 'update cluster state'
+)
+PERMISSION_LIST_HOSTS = PermissionWrapper(
+ 'list_hosts', 'list hosts', 'list hosts'
+)
+PERMISSION_LIST_HOST_CLUSTERS = PermissionWrapper(
+ 'list_host_clusters',
+ 'list host clusters',
+ 'list host clusters'
+)
+PERMISSION_UPDATE_HOST = PermissionWrapper(
+ 'update_host', 'update host', 'update host'
+)
+PERMISSION_DEL_HOST = PermissionWrapper(
+ 'del_host', 'del host', 'del host'
+)
+PERMISSION_LIST_HOST_CONFIG = PermissionWrapper(
+ 'list_host_config', 'list host config', 'list host config'
+)
+PERMISSION_ADD_HOST_CONFIG = PermissionWrapper(
+ 'add_host_config', 'add host config', 'add host config'
+)
+PERMISSION_DEL_HOST_CONFIG = PermissionWrapper(
+ 'del_host_config', 'del host config', 'del host config'
+)
+PERMISSION_LIST_HOST_NETWORKS = PermissionWrapper(
+ 'list_host_networks',
+ 'list host networks',
+ 'list host networks'
+)
+PERMISSION_ADD_HOST_NETWORK = PermissionWrapper(
+ 'add_host_network', 'add host network', 'add host network'
+)
+PERMISSION_DEL_HOST_NETWORK = PermissionWrapper(
+ 'del_host_network', 'del host network', 'del host network'
+)
+PERMISSION_GET_HOST_STATE = PermissionWrapper(
+ 'get_host_state', 'get host state', 'get host state'
+)
+PERMISSION_UPDATE_HOST_STATE = PermissionWrapper(
+ 'update_host_state', 'update host sate', 'update host state'
+)
+PERMISSION_LIST_CLUSTERHOSTS = PermissionWrapper(
+ 'list_clusterhosts', 'list cluster hosts', 'list cluster hosts'
+)
+PERMISSION_LIST_CLUSTERHOST_CONFIG = PermissionWrapper(
+ 'list_clusterhost_config',
+ 'list clusterhost config',
+ 'list clusterhost config'
+)
+PERMISSION_ADD_CLUSTERHOST_CONFIG = PermissionWrapper(
+ 'add_clusterhost_config',
+ 'add clusterhost config',
+ 'add clusterhost config'
+)
+PERMISSION_DEL_CLUSTERHOST_CONFIG = PermissionWrapper(
+ 'del_clusterhost_config',
+ 'del clusterhost config',
+ 'del clusterhost config'
+)
+PERMISSION_GET_CLUSTERHOST_STATE = PermissionWrapper(
+ 'get_clusterhost_state',
+ 'get clusterhost state',
+ 'get clusterhost state'
+)
+PERMISSION_UPDATE_CLUSTERHOST_STATE = PermissionWrapper(
+ 'update_clusterhost_state',
+ 'update clusterhost state',
+ 'update clusterhost state'
+)
+PERMISSION_LIST_HEALTH_REPORT = PermissionWrapper(
+ 'list_health_reports',
+ 'list health check report',
+ 'list health check report'
+)
+PERMISSION_GET_HEALTH_REPORT = PermissionWrapper(
+ 'get_health_report',
+ 'get health report',
+ 'get health report'
+)
+PERMISSION_CHECK_CLUSTER_HEALTH = PermissionWrapper(
+ 'start_check_cluster_health',
+ 'start check cluster health',
+ 'start check cluster health'
+)
+PERMISSION_SET_HEALTH_CHECK_ERROR = PermissionWrapper(
+ 'set_error_state',
+ 'set health check into error state',
+ 'set health check into error state'
+)
+PERMISSION_DELETE_REPORT = PermissionWrapper(
+ 'delete_reports',
+ 'delete health reports',
+ 'delete health reports'
+)
+PERMISSIONS = [
+ PERMISSION_LIST_PERMISSIONS,
+ PERMISSION_LIST_SWITCHES,
+ PERMISSION_ADD_SWITCH,
+ PERMISSION_DEL_SWITCH,
+ PERMISSION_LIST_SWITCH_FILTERS,
+ PERMISSION_UPDATE_SWITCH_FILTERS,
+ PERMISSION_LIST_SWITCH_MACHINES,
+ PERMISSION_ADD_SWITCH_MACHINE,
+ PERMISSION_DEL_SWITCH_MACHINE,
+ PERMISSION_UPDATE_SWITCH_MACHINES,
+ PERMISSION_LIST_MACHINES,
+ PERMISSION_ADD_MACHINE,
+ PERMISSION_DEL_MACHINE,
+ PERMISSION_LIST_ADAPTERS,
+ PERMISSION_LIST_METADATAS,
+ PERMISSION_LIST_SUBNETS,
+ PERMISSION_ADD_SUBNET,
+ PERMISSION_DEL_SUBNET,
+ PERMISSION_LIST_CLUSTERS,
+ PERMISSION_ADD_CLUSTER,
+ PERMISSION_DEL_CLUSTER,
+ PERMISSION_LIST_CLUSTER_CONFIG,
+ PERMISSION_ADD_CLUSTER_CONFIG,
+ PERMISSION_DEL_CLUSTER_CONFIG,
+ PERMISSION_UPDATE_CLUSTER_HOSTS,
+ PERMISSION_DEL_CLUSTER_HOST,
+ PERMISSION_REVIEW_CLUSTER,
+ PERMISSION_DEPLOY_CLUSTER,
+ PERMISSION_GET_CLUSTER_STATE,
+ PERMISSION_UPDATE_CLUSTER_STATE,
+ PERMISSION_LIST_HOSTS,
+ PERMISSION_LIST_HOST_CLUSTERS,
+ PERMISSION_UPDATE_HOST,
+ PERMISSION_DEL_HOST,
+ PERMISSION_LIST_HOST_CONFIG,
+ PERMISSION_ADD_HOST_CONFIG,
+ PERMISSION_DEL_HOST_CONFIG,
+ PERMISSION_LIST_HOST_NETWORKS,
+ PERMISSION_ADD_HOST_NETWORK,
+ PERMISSION_DEL_HOST_NETWORK,
+ PERMISSION_GET_HOST_STATE,
+ PERMISSION_UPDATE_HOST_STATE,
+ PERMISSION_DEPLOY_HOST,
+ PERMISSION_LIST_CLUSTERHOSTS,
+ PERMISSION_LIST_CLUSTERHOST_CONFIG,
+ PERMISSION_ADD_CLUSTERHOST_CONFIG,
+ PERMISSION_DEL_CLUSTERHOST_CONFIG,
+ PERMISSION_GET_CLUSTERHOST_STATE,
+ PERMISSION_UPDATE_CLUSTERHOST_STATE,
+ PERMISSION_LIST_HEALTH_REPORT,
+ PERMISSION_GET_HEALTH_REPORT,
+ PERMISSION_CHECK_CLUSTER_HEALTH,
+ PERMISSION_SET_HEALTH_CHECK_ERROR,
+ PERMISSION_DELETE_REPORT
+]
+
+
+@util.deprecated
+def list_permissions_internal(session, **filters):
+ """internal functions used only by other db.api modules."""
+ return utils.list_db_objects(session, models.Permission, **filters)
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_permissions(user=None, session=None, **filters):
+ """list permissions."""
+ return utils.list_db_objects(
+ session, models.Permission, **filters
+ )
+
+
+def _get_permission(permission_id, session=None, **kwargs):
+ """Get permission object by the unique key of Permission table."""
+ if isinstance(permission_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Permission, id=permission_id, **kwargs)
+ raise exception.InvalidParameter(
+ 'permission id %s type is not int compatible' % permission_id
+ )
+
+
+def get_permission_internal(permission_id, session=None, **kwargs):
+ return _get_permission(permission_id, session=session, **kwargs)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_permission(
+ permission_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get permissions."""
+ return _get_permission(
+ permission_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+def add_permissions_internal(session=None):
+ """internal functions used by other db.api modules only."""
+ permissions = []
+ for permission in PERMISSIONS:
+ permissions.append(
+ utils.add_db_object(
+ session, models.Permission,
+ True,
+ permission.name,
+ alias=permission.alias,
+ description=permission.description
+ )
+ )
+
+ return permissions
diff --git a/compass-deck/db/api/switch.py b/compass-deck/db/api/switch.py
new file mode 100644
index 0000000..647eec0
--- /dev/null
+++ b/compass-deck/db/api/switch.py
@@ -0,0 +1,1213 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Switch database operations."""
+import logging
+import netaddr
+import re
+
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state']
+SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state']
+SUPPORTED_SWITCH_MACHINES_FIELDS = [
+ 'switch_ip_int', 'port', 'vlans', 'mac', 'tag', 'location',
+ 'owner_id'
+]
+SUPPORTED_MACHINES_FIELDS = [
+ 'port', 'vlans', 'mac', 'tag', 'location', 'owner_id'
+]
+SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [
+ 'switch_ip_int', 'port', 'vlans', 'mac',
+ 'tag', 'location', 'os_name'
+]
+SUPPORTED_MACHINES_HOSTS_FIELDS = [
+ 'port', 'vlans', 'mac', 'tag', 'location',
+ 'os_name'
+]
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+ADDED_FIELDS = ['ip']
+OPTIONAL_ADDED_FIELDS = [
+ 'credentials', 'vendor', 'state', 'err_msg', 'machine_filters'
+]
+UPDATED_FIELDS = [
+ 'ip', 'credentials', 'vendor', 'state',
+ 'err_msg', 'put_machine_filters'
+]
+PATCHED_FIELDS = ['patched_credentials', 'patched_machine_filters']
+UPDATED_FILTERS_FIELDS = ['put_machine_filters']
+PATCHED_FILTERS_FIELDS = ['patched_machine_filters']
+ADDED_MACHINES_FIELDS = ['mac']
+OPTIONAL_ADDED_MACHINES_FIELDS = [
+ 'ipmi_credentials', 'tag', 'location', 'owner_id'
+]
+ADDED_SWITCH_MACHINES_FIELDS = ['port']
+OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS = ['vlans']
+UPDATED_MACHINES_FIELDS = [
+ 'ipmi_credentials',
+ 'tag', 'location'
+]
+UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans', 'owner_id']
+PATCHED_MACHINES_FIELDS = [
+ 'patched_ipmi_credentials',
+ 'patched_tag', 'patched_location'
+]
+PATCHED_SWITCH_MACHINES_FIELDS = ['patched_vlans']
+RESP_FIELDS = [
+ 'id', 'ip', 'credentials', 'vendor', 'state', 'err_msg',
+ 'filters', 'created_at', 'updated_at'
+]
+RESP_FILTERS_FIELDS = [
+ 'id', 'ip', 'filters', 'created_at', 'updated_at'
+]
+RESP_ACTION_FIELDS = [
+ 'status', 'details'
+]
+RESP_MACHINES_FIELDS = [
+ 'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
+ 'port', 'vlans', 'mac', 'owner_id',
+ 'ipmi_credentials', 'tag', 'location',
+ 'created_at', 'updated_at'
+]
+RESP_MACHINES_HOSTS_FIELDS = [
+ 'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
+ 'port', 'vlans', 'mac',
+ 'ipmi_credentials', 'tag', 'location', 'ip',
+ 'name', 'hostname', 'os_name', 'owner',
+ 'os_installer', 'reinstall_os', 'os_installed',
+ 'clusters', 'created_at', 'updated_at'
+]
+RESP_CLUSTER_FIELDS = [
+ 'name', 'id'
+]
+
+
+def _check_machine_filters(machine_filters):
+ """Check if machine filters format is acceptable."""
+ logging.debug('check machine filters: %s', machine_filters)
+ models.Switch.parse_filters(machine_filters)
+
+
+def _check_vlans(vlans):
+ """Check vlans format is acceptable."""
+ for vlan in vlans:
+ if not isinstance(vlan, int):
+ raise exception.InvalidParameter(
+ 'vlan %s is not int' % vlan
+ )
+
+
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip,
+ credentials=utils.check_switch_credentials,
+ machine_filters=_check_machine_filters
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def _add_switch(
+ ip, exception_when_existing=True,
+ machine_filters=setting.SWITCHES_DEFAULT_FILTERS,
+ session=None, **kwargs
+):
+ """Add switch by switch ip."""
+ ip_int = long(netaddr.IPAddress(ip))
+ return utils.add_db_object(
+ session, models.Switch, exception_when_existing, ip_int,
+ machine_filters=machine_filters, **kwargs
+ )
+
+
+def get_switch_internal(
+ switch_id, session=None, **kwargs
+):
+ """Get switch by switch id.
+
+ Should only be used by other files under db/api
+ """
+ return _get_switch(switch_id, session=session, **kwargs)
+
+
+def _get_switch(switch_id, session=None, **kwargs):
+ """Get Switch object switch id."""
+ if isinstance(switch_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Switch,
+ id=switch_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'switch id %s type is not int compatible' % switch_id)
+
+
+def _get_switch_by_ip(switch_ip, session=None, **kwargs):
+ """Get switch by switch ip."""
+ switch_ip_int = long(netaddr.IPAddress(switch_ip))
+ return utils.get_db_object(
+ session, models.Switch,
+ ip_int=switch_ip_int, **kwargs
+ )
+
+
+def _get_switch_machine(switch_id, machine_id, session=None, **kwargs):
+ """Get switch machine by switch id and machine id."""
+ switch = _get_switch(switch_id, session=session)
+ from compass.db.api import machine as machine_api
+ machine = machine_api.get_machine_internal(machine_id, session=session)
+ return utils.get_db_object(
+ session, models.SwitchMachine,
+ switch_id=switch.id, machine_id=machine.id, **kwargs
+ )
+
+
+def _get_switchmachine(switch_machine_id, session=None, **kwargs):
+ """Get switch machine by switch_machine_id."""
+ if not isinstance(switch_machine_id, (int, long)):
+ raise exception.InvalidParameter(
+ 'switch machine id %s type is not int compatible' % (
+ switch_machine_id
+ )
+ )
+ return utils.get_db_object(
+ session, models.SwitchMachine,
+ switch_machine_id=switch_machine_id, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCHES
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_switch(
+ switch_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a switch by switch id."""
+ return _get_switch(
+ switch_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCHES
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_switches(user=None, session=None, **filters):
+ """List switches."""
+ # TODO(xicheng): should discuss with weidong.
+ # If we can deprecate the use of DEFAULT_SWITCH_IP,
+ # The code will be simpler.
+ # The UI should use /machines-hosts instead of
+ # /switches-machines-hosts and can show multi switch ip/port
+ # under one row of machine info.
+ switches = utils.list_db_objects(
+ session, models.Switch, **filters
+ )
+ if 'ip_int' in filters:
+ return switches
+ else:
+ return [
+ switch for switch in switches
+ if switch.ip != setting.DEFAULT_SWITCH_IP
+ ]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SWITCH
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_switch(switch_id, user=None, session=None, **kwargs):
+ """Delete a switch.
+
+ If switch is not the default switch, and the machine under this switch
+ is only connected to this switch, the machine will be moved to connect
+ to default switch. Otherwise we can only simply delete the switch
+ machine. The purpose here to make sure there is no machine not
+ connecting to any switch.
+ """
+ # TODO(xicheng): Simplify the logic if the default switch feature
+ # can be deprecated.
+ switch = _get_switch(switch_id, session=session)
+ default_switch = _get_switch_by_ip(
+ setting.DEFAULT_SWITCH_IP, session=session
+ )
+ if switch.id != default_switch.id:
+ for switch_machine in switch.switch_machines:
+ machine = switch_machine.machine
+ if len(machine.switch_machines) <= 1:
+ utils.add_db_object(
+ session, models.SwitchMachine,
+ False,
+ default_switch.id, machine.id,
+ port=switch_machine.port
+ )
+ return utils.del_db_object(session, switch)
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def add_switch(
+ exception_when_existing=True, ip=None,
+ user=None, session=None, **kwargs
+):
+ """Create a switch."""
+ return _add_switch(
+ ip,
+ exception_when_existing=exception_when_existing,
+ session=session, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def add_switches(
+ exception_when_existing=False,
+ data=[], user=None, session=None
+):
+ """Create switches."""
+ # TODO(xicheng): simplify the batch api.
+ switches = []
+ fail_switches = []
+ for switch_data in data:
+ switch_object = _get_switch_by_ip(
+ switch_data['ip'], session=session,
+ exception_when_missing=False
+ )
+ if switch_object:
+ logging.error('ip %s exists in switch %s' % (
+ switch_data['ip'], switch_object.id
+ ))
+ fail_switches.append(switch_data)
+ else:
+ switches.append(
+ _add_switch(
+ exception_when_existing=exception_when_existing,
+ session=session,
+ **switch_data
+ )
+ )
+ return {
+ 'switches': switches,
+ 'fail_switches': fail_switches
+ }
+
+
+@utils.wrap_to_dict(RESP_FIELDS)
+def _update_switch(switch_id, session=None, **kwargs):
+ """Update a switch."""
+ switch = _get_switch(switch_id, session=session)
+ return utils.update_db_object(session, switch, **kwargs)
+
+
+# replace machine_filters in kwargs to put_machine_filters,
+# which is used to tell db this is a put action for the field.
+@utils.replace_filters(
+ machine_filters='put_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ credentials=utils.check_switch_credentials,
+ put_machine_filters=_check_machine_filters
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def update_switch(switch_id, user=None, session=None, **kwargs):
+ """Update fields of a switch."""
+ return _update_switch(switch_id, session=session, **kwargs)
+
+
+# replace credentials to patched_credentials,
+# machine_filters to patched_machine_filters in kwargs.
+# This is to tell db they are patch action to the above fields.
+@utils.replace_filters(
+ credentials='patched_credentials',
+ machine_filters='patched_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ patched_machine_filters=_check_machine_filters
+)
+@database.run_in_session()
+@utils.output_validates(
+ credentials=utils.check_switch_credentials
+)
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def patch_switch(switch_id, user=None, session=None, **kwargs):
+ """Patch fields of a switch."""
+ return _update_switch(switch_id, session=session, **kwargs)
+
+
+@util.deprecated
+@utils.supported_filters(optional_support_keys=SUPPORTED_FILTER_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def list_switch_filters(user=None, session=None, **filters):
+ """List all switches' filters."""
+ return utils.list_db_objects(
+ session, models.Switch, **filters
+ )
+
+
+@util.deprecated
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def get_switch_filters(
+ switch_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get filters of a switch."""
+ return _get_switch(
+ switch_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@util.deprecated
+@utils.replace_filters(
+ machine_filters='put_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FILTERS_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(put_machine_filters=_check_machine_filters)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def update_switch_filters(switch_id, user=None, session=None, **kwargs):
+ """Update filters of a switch."""
+ switch = _get_switch(switch_id, session=session)
+ return utils.update_db_object(session, switch, **kwargs)
+
+
+@util.deprecated
+@utils.replace_filters(
+ machine_filters='patched_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_FILTERS_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(patched_machine_filters=_check_machine_filters)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def patch_switch_filter(switch_id, user=None, session=None, **kwargs):
+ """Patch filters to a switch."""
+ switch = _get_switch(switch_id, session=session)
+ return utils.update_db_object(session, switch, **kwargs)
+
+
+@util.deprecated
+def get_switch_machines_internal(session, **filters):
+ return utils.list_db_objects(
+ session, models.SwitchMachine, **filters
+ )
+
+
+def _filter_port(port_filter, obj):
+ """filter switch machines by port.
+
+ supported port_filter keys: [
+ 'startswith', 'endswith', 'resp_lt',
+ 'resp_le', 'resp_gt', 'resp_ge', 'resp_range'
+ ]
+
+ port_filter examples:
+ {
+ 'startswitch': 'ae', 'endswith': '',
+ 'resp_ge': 20, 'resp_le': 30,
+ }
+ """
+ port_prefix = port_filter.get('startswith', '')
+ port_suffix = port_filter.get('endswith', '')
+ pattern = re.compile(r'%s(\d+)%s' % (port_prefix, port_suffix))
+ match = pattern.match(obj)
+ if not match:
+ return False
+ port_number = int(match.group(1))
+ if (
+ 'resp_lt' in port_filter and
+ port_number >= port_filter['resp_lt']
+ ):
+ return False
+ if (
+ 'resp_le' in port_filter and
+ port_number > port_filter['resp_le']
+ ):
+ return False
+ if (
+ 'resp_gt' in port_filter and
+ port_number <= port_filter['resp_gt']
+ ):
+ return False
+ if (
+ 'resp_ge' in port_filter and
+ port_number < port_filter['resp_ge']
+ ):
+ return False
+ if 'resp_range' in port_filter:
+ resp_range = port_filter['resp_range']
+ if not isinstance(resp_range, list):
+ resp_range = [resp_range]
+ in_range = False
+ for port_start, port_end in resp_range:
+ if port_start <= port_number <= port_end:
+ in_range = True
+ break
+ if not in_range:
+ return False
+ return True
+
+
+def _filter_vlans(vlan_filter, obj):
+ """Filter switch machines by vlan.
+
+ supported keys in vlan_filter:
+ ['resp_in']
+ """
+ vlans = set(obj)
+ if 'resp_in' in vlan_filter:
+ resp_vlans = set(vlan_filter['resp_in'])
+ if not (vlans & resp_vlans):
+ return False
+ return True
+
+
+@utils.output_filters(
+ port=_filter_port, vlans=_filter_vlans,
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def _filter_switch_machines(switch_machines):
+ """Get filtered switch machines.
+
+ The filters are defined in each switch.
+ """
+ return [
+ switch_machine for switch_machine in switch_machines
+ if not switch_machine.filtered
+ ]
+
+
+@utils.output_filters(
+ missing_ok=True,
+ port=_filter_port, vlans=_filter_vlans,
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback,
+ os_name=utils.general_filter_callback,
+)
+@utils.wrap_to_dict(
+ RESP_MACHINES_HOSTS_FIELDS,
+ clusters=RESP_CLUSTER_FIELDS
+)
+def _filter_switch_machines_hosts(switch_machines):
+ """Similar as _filter_switch_machines, but also return host info."""
+ filtered_switch_machines = [
+ switch_machine for switch_machine in switch_machines
+ if not switch_machine.filtered
+ ]
+ switch_machines_hosts = []
+ for switch_machine in filtered_switch_machines:
+ machine = switch_machine.machine
+ host = machine.host
+ if host:
+ switch_machine_host_dict = host.to_dict()
+ else:
+ switch_machine_host_dict = machine.to_dict()
+ switch_machine_host_dict.update(
+ switch_machine.to_dict()
+ )
+ switch_machines_hosts.append(switch_machine_host_dict)
+ return switch_machines_hosts
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_MACHINES_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switch_machines(
+ switch_id, user=None, session=None, **filters
+):
+ """Get switch machines of a switch."""
+ switch = _get_switch(switch_id, session=session)
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, switch_id=switch.id, **filters
+ )
+ if not user.is_admin and len(switch_machines):
+ switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
+ return _filter_switch_machines(switch_machines)
+
+
+# replace ip_int to switch_ip_int in kwargs
+@utils.replace_filters(
+ ip_int='switch_ip_int'
+)
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_SWITCH_MACHINES_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switchmachines(user=None, session=None, **filters):
+ """List switch machines."""
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, **filters
+ )
+ return _filter_switch_machines(
+ switch_machines
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_MACHINES_HOSTS_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switch_machines_hosts(
+ switch_id, user=None, session=None, **filters
+):
+ """Get switch machines and possible hosts of a switch."""
+ switch = _get_switch(switch_id, session=session)
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, switch_id=switch.id, **filters
+ )
+ return _filter_switch_machines_hosts(
+ switch_machines
+ )
+
+
+# replace ip_int to switch_ip_int in kwargs
+@utils.replace_filters(
+ ip_int='switch_ip_int'
+)
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switchmachines_hosts(user=None, session=None, **filters):
+ """List switch machines hnd possible hosts."""
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, **filters
+ )
+ if not user.is_admin and len(switch_machines):
+ switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
+ return _filter_switch_machines_hosts(
+ switch_machines
+ )
+
+
+@utils.supported_filters(
+ ADDED_MACHINES_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS,
+ ignore_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS
+)
+@utils.input_validates(mac=utils.check_mac)
+def _add_machine_if_not_exist(mac=None, session=None, **kwargs):
+ """Add machine if the mac does not exist in any machine."""
+ return utils.add_db_object(
+ session, models.Machine, False,
+ mac, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_SWITCH_MACHINES_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS,
+ ignore_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS
+)
+@utils.input_validates(vlans=_check_vlans)
+def _add_switch_machine_only(
+ switch, machine, exception_when_existing=True,
+ session=None, owner_id=None, port=None, **kwargs
+):
+ """add a switch machine."""
+ return utils.add_db_object(
+ session, models.SwitchMachine,
+ exception_when_existing,
+ switch.id, machine.id, port=port,
+ owner_id=owner_id,
+ **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_MACHINES_FIELDS + ADDED_SWITCH_MACHINES_FIELDS,
+ optional_support_keys=(
+ OPTIONAL_ADDED_MACHINES_FIELDS +
+ OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def _add_switch_machine(
+ switch_id, exception_when_existing=True,
+ mac=None, port=None, session=None, owner_id=None, **kwargs
+):
+ """Add switch machine.
+
+ If underlying machine does not exist, also create the underlying
+ machine.
+ """
+ switch = _get_switch(switch_id, session=session)
+ machine = _add_machine_if_not_exist(
+ mac=mac, session=session, owner_id=owner_id, **kwargs
+ )
+ return _add_switch_machine_only(
+ switch, machine,
+ exception_when_existing,
+ port=port, session=session, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+def add_switch_machine(
+ switch_id, exception_when_existing=True,
+ mac=None, user=None, session=None,
+ owner_id=None, **kwargs
+):
+ """Add switch machine to a switch."""
+ return _add_switch_machine(
+ switch_id,
+ exception_when_existing=exception_when_existing,
+ mac=mac, session=session, owner_id=owner_id, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(
+ [
+ 'switches_machines',
+ 'duplicate_switches_machines',
+ 'fail_switches_machines'
+ ],
+ switches_machines=RESP_MACHINES_FIELDS,
+ duplicate_switches_machines=RESP_MACHINES_FIELDS
+)
+def add_switch_machines(
+ exception_when_existing=False,
+ data=[], user=None, session=None, owner_id=None
+):
+ """Add switch machines."""
+ switch_machines = []
+ duplicate_switch_machines = []
+ failed_switch_machines = []
+ switches_mapping = {}
+ switch_machines_mapping = {}
+ switch_ips = []
+ for item_data in data:
+ switch_ip = item_data['switch_ip']
+ if switch_ip not in switches_mapping:
+ switch_object = _get_switch_by_ip(
+ switch_ip, session=session,
+ exception_when_missing=False
+ )
+ if switch_object:
+ switch_ips.append(switch_ip)
+ switches_mapping[switch_ip] = switch_object
+ else:
+ logging.error(
+ 'switch %s does not exist' % switch_ip
+ )
+ item_data.pop('switch_ip')
+ failed_switch_machines.append(item_data)
+ else:
+ switch_object = switches_mapping[switch_ip]
+ if switch_object:
+ item_data.pop('switch_ip')
+ switch_machines_mapping.setdefault(
+ switch_object.id, []
+ ).append(item_data)
+
+ for switch_ip in switch_ips:
+ switch_object = switches_mapping[switch_ip]
+ switch_id = switch_object.id
+ machines = switch_machines_mapping[switch_id]
+ for machine in machines:
+ mac = machine['mac']
+ machine_object = _add_machine_if_not_exist(
+ mac=mac, session=session
+ )
+ switch_machine_object = _get_switch_machine(
+ switch_id, machine_object.id, session=session,
+ exception_when_missing=False
+ )
+ if switch_machine_object:
+ port = machine['port']
+ switch_machine_id = switch_machine_object.switch_machine_id
+ exist_port = switch_machine_object.port
+ if exist_port != port:
+ logging.error(
+ 'switch machine %s exist port %s is '
+ 'different from added port %s' % (
+ switch_machine_id,
+ exist_port, port
+ )
+ )
+ failed_switch_machines.append(machine)
+ else:
+ logging.error(
+ 'iswitch machine %s is dulicate, '
+ 'will not be override' % switch_machine_id
+ )
+ duplicate_switch_machines.append(machine)
+ else:
+ del machine['mac']
+ switch_machines.append(_add_switch_machine_only(
+ switch_object, machine_object,
+ exception_when_existing,
+ session=session, owner_id=owner_id, **machine
+ ))
+ return {
+ 'switches_machines': switch_machines,
+ 'duplicate_switches_machines': duplicate_switch_machines,
+ 'fail_switches_machines': failed_switch_machines
+ }
+
+
+@utils.supported_filters(optional_support_keys=['find_machines'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_ACTION_FIELDS)
+def poll_switch(switch_id, user=None, session=None, **kwargs):
+ """poll switch to get machines."""
+ from compass.tasks import client as celery_client
+ switch = _get_switch(switch_id, session=session)
+ celery_client.celery.send_task(
+ 'compass.tasks.pollswitch',
+ (user.email, switch.ip, switch.credentials),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'action %s sent' % kwargs,
+ 'details': {
+ }
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def get_switch_machine(
+ switch_id, machine_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a switch machine by switch id and machine id."""
+ return _get_switch_machine(
+ switch_id, machine_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def get_switchmachine(
+ switch_machine_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a switch machine by switch_machine_id."""
+ return _get_switchmachine(
+ switch_machine_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS
+ ),
+ ignore_support_keys=(
+ UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ )
+)
+def _update_machine_if_necessary(
+ machine, session=None, **kwargs
+):
+ """Update machine is there is something to update."""
+ utils.update_db_object(
+ session, machine, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=(
+ UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS
+ )
+)
+def _update_switch_machine_only(switch_machine, session=None, **kwargs):
+ """Update switch machine."""
+ return utils.update_db_object(
+ session, switch_machine, **kwargs
+ )
+
+
+def _update_switch_machine(
+ switch_machine, session=None, **kwargs
+):
+ """Update switch machine.
+
+ If there are some attributes of underlying machine need to update,
+ also update them in underlying machine.
+ """
+ _update_machine_if_necessary(
+ switch_machine.machine, session=session, **kwargs
+ )
+ return _update_switch_machine_only(
+ switch_machine, session=session, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def update_switch_machine(
+ switch_id, machine_id, user=None,
+ session=None, **kwargs
+):
+ """Update switch machine by switch id and machine id."""
+ switch_machine = _get_switch_machine(
+ switch_id, machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def update_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
+ """Update switch machine by switch_machine_id."""
+ switch_machine = _get_switchmachine(
+ switch_machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+# replace [vlans, ipmi_credentials, tag, location] to
+# [patched_vlans, patched_ipmi_credentials, patched_tag,
+# patched_location] in kwargs. It tells db these fields will
+# be patched.
+@utils.replace_filters(
+ vlans='patched_vlans',
+ ipmi_credentials='patched_ipmi_credentials',
+ tag='patched_tag',
+ location='patched_location'
+)
+@utils.supported_filters(
+ optional_support_keys=(
+ PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(patched_vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def patch_switch_machine(
+ switch_id, machine_id, user=None,
+ session=None, **kwargs
+):
+ """Patch switch machine by switch_id and machine_id."""
+ switch_machine = _get_switch_machine(
+ switch_id, machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+# replace [vlans, ipmi_credentials, tag, location] to
+# [patched_vlans, patched_ipmi_credentials, patched_tag,
+# patched_location] in kwargs. It tells db these fields will
+# be patched.
+@utils.replace_filters(
+ vlans='patched_vlans',
+ ipmi_credentials='patched_ipmi_credentials',
+ tag='patched_tag',
+ location='patched_location'
+)
+@utils.supported_filters(
+ optional_support_keys=(
+ PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(patched_vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def patch_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
+ """Patch switch machine by switch_machine_id."""
+ switch_machine = _get_switchmachine(
+ switch_machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+def _del_switch_machine(
+ switch_machine, session=None
+):
+ """Delete switch machine.
+
+ If this is the last switch machine associated to underlying machine,
+ add a switch machine record to default switch to make the machine
+ searchable.
+ """
+ default_switch = _get_switch_by_ip(
+ setting.DEFAULT_SWITCH_IP, session=session
+ )
+ machine = switch_machine.machine
+ if len(machine.switch_machines) <= 1:
+ utils.add_db_object(
+ session, models.SwitchMachine,
+ False,
+ default_switch.id, machine.id,
+ port=switch_machine.port
+ )
+ return utils.del_db_object(session, switch_machine)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def del_switch_machine(
+ switch_id, machine_id, user=None,
+ session=None, **kwargs
+):
+ """Delete switch machine by switch id and machine id."""
+ switch_machine = _get_switch_machine(
+ switch_id, machine_id, session=session
+ )
+ return _del_switch_machine(switch_machine, session=session)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def del_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
+ """Delete switch machine by switch_machine_id."""
+ switch_machine = _get_switchmachine(
+ switch_machine_id, session=session
+ )
+ return _del_switch_machine(switch_machine, session=session)
+
+
+@utils.supported_filters(
+ ['machine_id'],
+ optional_support_keys=UPDATED_SWITCH_MACHINES_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+def _add_machine_to_switch(
+ switch_id, machine_id, session=None, **kwargs
+):
+ """Add machine to switch."""
+ switch = _get_switch(switch_id, session=session)
+ from compass.db.api import machine as machine_api
+ machine = machine_api.get_machine_internal(
+ machine_id, session=session
+ )
+ _add_switch_machine_only(
+ switch, machine, False,
+ owner_id=machine.owner_id, **kwargs
+ )
+
+
+def _add_machines(switch, machines, session=None):
+ """Add machines to switch.
+
+ Args:
+ machines: list of dict which contains attributes to
+ add machine to switch.
+
+ machines example:
+ {{'machine_id': 1, 'port': 'ae20'}]
+ """
+ for machine in machines:
+ _add_machine_to_switch(
+ switch.id, session=session, **machine
+ )
+
+
+def _remove_machines(switch, machines, session=None):
+ """Remove machines from switch.
+
+ Args:
+ machines: list of machine id.
+
+ machines example:
+ [1,2]
+ """
+ utils.del_db_objects(
+ session, models.SwitchMachine,
+ switch_id=switch.id, machine_id=machines
+ )
+
+
+def _set_machines(switch, machines, session=None):
+ """Reset machines to a switch.
+
+ Args:
+ machines: list of dict which contains attributes to
+ add machine to switch.
+
+ machines example:
+ {{'machine_id': 1, 'port': 'ae20'}]
+ """
+ utils.del_db_objects(
+ session, models.SwitchMachine,
+ switch_id=switch.id
+ )
+ for switch_machine in machines:
+ _add_machine_to_switch(
+ switch.id, session=session, **switch_machine
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=[
+ 'add_machines', 'remove_machines', 'set_machines'
+ ]
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def update_switch_machines(
+ switch_id, add_machines=[], remove_machines=[],
+ set_machines=None, user=None, session=None, **kwargs
+):
+ """update switch's machines"""
+ switch = _get_switch(switch_id, session=session)
+ if remove_machines:
+ _remove_machines(
+ switch, remove_machines, session=session
+ )
+ if add_machines:
+ _add_machines(
+ switch, add_machines, session=session
+ )
+ if set_machines is not None:
+ _set_machines(
+ switch, set_machines, session=session
+ )
+ return switch.switch_machines
diff --git a/compass-deck/db/api/user.py b/compass-deck/db/api/user.py
new file mode 100644
index 0000000..db039eb
--- /dev/null
+++ b/compass-deck/db/api/user.py
@@ -0,0 +1,553 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User database operations."""
+import datetime
+import functools
+import logging
+import re
+
+from flask.ext.login import UserMixin
+
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['email', 'is_admin', 'active']
+PERMISSION_SUPPORTED_FIELDS = ['name']
+SELF_UPDATED_FIELDS = ['email', 'firstname', 'lastname', 'password']
+ADMIN_UPDATED_FIELDS = ['is_admin', 'active']
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+UPDATED_FIELDS = [
+ 'email', 'firstname', 'lastname', 'password', 'is_admin', 'active'
+]
+ADDED_FIELDS = ['email', 'password']
+OPTIONAL_ADDED_FIELDS = ['is_admin', 'active']
+PERMISSION_ADDED_FIELDS = ['permission_id']
+RESP_FIELDS = [
+ 'id', 'email', 'is_admin', 'active', 'firstname',
+ 'lastname', 'created_at', 'updated_at'
+]
+RESP_TOKEN_FIELDS = [
+ 'id', 'user_id', 'token', 'expire_timestamp'
+]
+PERMISSION_RESP_FIELDS = [
+ 'id', 'user_id', 'permission_id', 'name', 'alias', 'description',
+ 'created_at', 'updated_at'
+]
+
+
+def _check_email(email):
+ """Check email is email format."""
+ if '@' not in email:
+ raise exception.InvalidParameter(
+ 'there is no @ in email address %s.' % email
+ )
+
+
+def _check_user_permission(user, permission, session=None):
+ """Check user has permission."""
+ if not user:
+ logging.info('empty user means the call is from internal')
+ return
+ if user.is_admin:
+ return
+
+ user_permission = utils.get_db_object(
+ session, models.UserPermission,
+ False, user_id=user.id, name=permission.name
+ )
+ if not user_permission:
+ raise exception.Forbidden(
+ 'user %s does not have permission %s' % (
+ user.email, permission.name
+ )
+ )
+
+
+def check_user_permission(permission):
+ """Decorator to check user having permission."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ user = kwargs.get('user')
+ if user is not None:
+ session = kwargs.get('session')
+ if session is None:
+ raise exception.DatabaseException(
+ 'wrapper check_user_permission does not run in session'
+ )
+ _check_user_permission(user, permission, session=session)
+ return func(*args, **kwargs)
+ else:
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def check_user_admin():
+ """Decorator to check user is admin."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ user = kwargs.get('user')
+ if user is not None:
+ if not user.is_admin:
+ raise exception.Forbidden(
+ 'User %s is not admin.' % (
+ user.email
+ )
+ )
+ return func(*args, **kwargs)
+ else:
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def check_user_admin_or_owner():
+ """Decorator to check user is admin or the owner of the resource."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(user_id, *args, **kwargs):
+ user = kwargs.get('user')
+ if user is not None:
+ session = kwargs.get('session')
+ if session is None:
+ raise exception.DatabaseException(
+ 'wrapper check_user_admin_or_owner is '
+ 'not called in session'
+ )
+ check_user = _get_user(user_id, session=session)
+ if not user.is_admin and user.id != check_user.id:
+ raise exception.Forbidden(
+ 'User %s is not admin or the owner of user %s.' % (
+ user.email, check_user.email
+ )
+ )
+
+ return func(
+ user_id, *args, **kwargs
+ )
+ else:
+ return func(
+ user_id, *args, **kwargs
+ )
+ return wrapper
+ return decorator
+
+
+def _add_user_permissions(user, session=None, **permission_filters):
+ """add permissions to a user."""
+ from compass.db.api import permission as permission_api
+ for api_permission in permission_api.list_permissions(
+ session=session, **permission_filters
+ ):
+ utils.add_db_object(
+ session, models.UserPermission, False,
+ user.id, api_permission['id']
+ )
+
+
+def _remove_user_permissions(user, session=None, **permission_filters):
+ """remove permissions from a user."""
+ from compass.db.api import permission as permission_api
+ permission_ids = [
+ api_permission['id']
+ for api_permission in permission_api.list_permissions(
+ session=session, **permission_filters
+ )
+ ]
+ utils.del_db_objects(
+ session, models.UserPermission,
+ user_id=user.id, permission_id=permission_ids
+ )
+
+
+def _set_user_permissions(user, session=None, **permission_filters):
+ """set permissions to a user."""
+ utils.del_db_objects(
+ session, models.UserPermission,
+ user_id=user.id
+ )
+ _add_user_permissions(session, user, **permission_filters)
+
+
+class UserWrapper(UserMixin):
+ """Wrapper class provided to flask."""
+
+ def __init__(
+ self, id, email, crypted_password,
+ active=True, is_admin=False,
+ expire_timestamp=None, token='', **kwargs
+ ):
+ self.id = id
+ self.email = email
+ self.password = crypted_password
+ self.active = active
+ self.is_admin = is_admin
+ self.expire_timestamp = expire_timestamp
+ if not token:
+ self.token = self.get_auth_token()
+ else:
+ self.token = token
+ super(UserWrapper, self).__init__()
+
+ def authenticate(self, password):
+ if not util.encrypt(password, self.password) == self.password:
+ raise exception.Unauthorized('%s password mismatch' % self.email)
+
+ def get_auth_token(self):
+ return util.encrypt(self.email)
+
+ def is_active(self):
+ return self.active
+
+ def get_id(self):
+ return self.token
+
+ def is_authenticated(self):
+ current_time = datetime.datetime.now()
+ return (
+ not self.expire_timestamp or
+ current_time < self.expire_timestamp
+ )
+
+ def __str__(self):
+ return '%s[email:%s,password:%s]' % (
+ self.__class__.__name__, self.email, self.password)
+
+
+@database.run_in_session()
+def get_user_object(email, session=None, **kwargs):
+ """get user and convert to UserWrapper object."""
+ user = utils.get_db_object(
+ session, models.User, False, email=email
+ )
+ if not user:
+ raise exception.Unauthorized(
+ '%s unauthorized' % email
+ )
+ user_dict = user.to_dict()
+ user_dict.update(kwargs)
+ return UserWrapper(**user_dict)
+
+
+@database.run_in_session(exception_when_in_session=False)
+def get_user_object_from_token(token, session=None):
+ """Get user from token and convert to UserWrapper object.
+
+ ::note:
+ get_user_object_from_token may be called in session.
+ """
+ expire_timestamp = {
+ 'ge': datetime.datetime.now()
+ }
+ user_token = utils.get_db_object(
+ session, models.UserToken, False,
+ token=token, expire_timestamp=expire_timestamp
+ )
+ if not user_token:
+ raise exception.Unauthorized(
+ 'invalid user token: %s' % token
+ )
+ user_dict = _get_user(
+ user_token.user_id, session=session
+ ).to_dict()
+ user_dict['token'] = token
+ expire_timestamp = user_token.expire_timestamp
+ user_dict['expire_timestamp'] = expire_timestamp
+ return UserWrapper(**user_dict)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
+def record_user_token(
+ token, expire_timestamp, user=None, session=None
+):
+ """record user token in database."""
+ user_token = utils.get_db_object(
+ session, models.UserToken, False,
+ user_id=user.id, token=token
+ )
+ if not user_token:
+ return utils.add_db_object(
+ session, models.UserToken, True,
+ token, user_id=user.id,
+ expire_timestamp=expire_timestamp
+ )
+ elif expire_timestamp > user_token.expire_timestamp:
+ return utils.update_db_object(
+ session, user_token, expire_timestamp=expire_timestamp
+ )
+ return user_token
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
+def clean_user_token(token, user=None, session=None):
+ """clean user token in database."""
+ return utils.del_db_objects(
+ session, models.UserToken,
+ token=token, user_id=user.id
+ )
+
+
+def _get_user(user_id, session=None, **kwargs):
+ """Get user object by user id."""
+ if isinstance(user_id, (int, long)):
+ return utils.get_db_object(
+ session, models.User, id=user_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'user id %s type is not int compatible' % user_id
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_user(
+ user_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a user."""
+ return _get_user(
+ user_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_current_user(
+ exception_when_missing=True, user=None,
+ session=None, **kwargs
+):
+ """get current user."""
+ return _get_user(
+ user.id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_FIELDS
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_users(user=None, session=None, **filters):
+ """List all users."""
+ return utils.list_db_objects(
+ session, models.User, **filters
+ )
+
+
+@utils.input_validates(email=_check_email)
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_user(
+ exception_when_existing=True, user=None,
+ session=None, email=None, **kwargs
+):
+ """Create a user and return created user object."""
+ add_user = utils.add_db_object(
+ session, models.User,
+ exception_when_existing, email,
+ **kwargs)
+ _add_user_permissions(
+ add_user,
+ session=session,
+ name=setting.COMPASS_DEFAULT_PERMISSIONS
+ )
+ return add_user
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_user(user_id, user=None, session=None, **kwargs):
+ """delete a user and return the deleted user object."""
+ del_user = _get_user(user_id, session=session)
+ return utils.del_db_object(session, del_user)
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(email=_check_email)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_user(user_id, user=None, session=None, **kwargs):
+ """Update a user and return the updated user object."""
+ update_user = _get_user(
+ user_id, session=session,
+ )
+ allowed_fields = set()
+ if user.is_admin:
+ allowed_fields |= set(ADMIN_UPDATED_FIELDS)
+ if user.id == update_user.id:
+ allowed_fields |= set(SELF_UPDATED_FIELDS)
+ unsupported_fields = set(kwargs) - allowed_fields
+ if unsupported_fields:
+ # The user is not allowed to update a user.
+ raise exception.Forbidden(
+ 'User %s has no permission to update user %s fields %s.' % (
+ user.email, user.email, unsupported_fields
+ )
+ )
+ return utils.update_db_object(session, update_user, **kwargs)
+
+
+@utils.supported_filters(optional_support_keys=PERMISSION_SUPPORTED_FIELDS)
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def get_permissions(
+ user_id, user=None, exception_when_missing=True,
+ session=None, **kwargs
+):
+ """List permissions of a user."""
+ get_user = _get_user(
+ user_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+ return utils.list_db_objects(
+ session, models.UserPermission, user_id=get_user.id, **kwargs
+ )
+
+
+def _get_permission(user_id, permission_id, session=None, **kwargs):
+ """Get user permission by user id and permission id."""
+ user = _get_user(user_id, session=session)
+ from compass.db.api import permission as permission_api
+ permission = permission_api.get_permission_internal(
+ permission_id, session=session
+ )
+ return utils.get_db_object(
+ session, models.UserPermission,
+ user_id=user.id, permission_id=permission.id,
+ **kwargs
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def get_permission(
+ user_id, permission_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get a permission of a user."""
+ return _get_permission(
+ user_id, permission_id,
+ exception_when_missing=exception_when_missing,
+ session=session,
+ **kwargs
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def del_permission(user_id, permission_id, user=None, session=None, **kwargs):
+ """Delete a permission from a user."""
+ user_permission = _get_permission(
+ user_id, permission_id,
+ session=session, **kwargs
+ )
+ return utils.del_db_object(session, user_permission)
+
+
+@utils.supported_filters(
+ PERMISSION_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def add_permission(
+ user_id, permission_id=None, exception_when_existing=True,
+ user=None, session=None
+):
+ """Add a permission to a user."""
+ get_user = _get_user(user_id, session=session)
+ from compass.db.api import permission as permission_api
+ get_permission = permission_api.get_permission_internal(
+ permission_id, session=session
+ )
+ return utils.add_db_object(
+ session, models.UserPermission, exception_when_existing,
+ get_user.id, get_permission.id
+ )
+
+
+def _get_permission_filters(permission_ids):
+ """Helper function to filter permissions."""
+ if permission_ids == 'all':
+ return {}
+ else:
+ return {'id': permission_ids}
+
+
+@utils.supported_filters(
+ optional_support_keys=[
+ 'add_permissions', 'remove_permissions', 'set_permissions'
+ ]
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def update_permissions(
+ user_id, add_permissions=[], remove_permissions=[],
+ set_permissions=None, user=None, session=None, **kwargs
+):
+ """update user permissions."""
+ update_user = _get_user(user_id, session=session)
+ if remove_permissions:
+ _remove_user_permissions(
+ update_user, session=session,
+ **_get_permission_filters(remove_permissions)
+ )
+ if add_permissions:
+ _add_user_permissions(
+ update_user, session=session,
+ **_get_permission_filters(add_permissions)
+ )
+ if set_permissions is not None:
+ _set_user_permissions(
+ update_user, session=session,
+ **_get_permission_filters(set_permissions)
+ )
+ return update_user.user_permissions
diff --git a/compass-deck/db/api/user_log.py b/compass-deck/db/api/user_log.py
new file mode 100644
index 0000000..70de9db
--- /dev/null
+++ b/compass-deck/db/api/user_log.py
@@ -0,0 +1,82 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""UserLog database operations."""
+import logging
+
+from compass.db.api import database
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+
+SUPPORTED_FIELDS = ['user_email', 'timestamp']
+USER_SUPPORTED_FIELDS = ['timestamp']
+RESP_FIELDS = ['user_id', 'action', 'timestamp']
+
+
+@database.run_in_session()
+def log_user_action(user_id, action, session=None):
+ """Log user action."""
+ utils.add_db_object(
+ session, models.UserLog, True, user_id=user_id, action=action
+ )
+
+
+@utils.supported_filters(optional_support_keys=USER_SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_admin_or_owner()
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_user_actions(user_id, user=None, session=None, **filters):
+ """list user actions of a user."""
+ list_user = user_api.get_user(user_id, user=user, session=session)
+ return utils.list_db_objects(
+ session, models.UserLog, order_by=['timestamp'],
+ user_id=list_user['id'], **filters
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@user_api.check_user_admin()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_actions(user=None, session=None, **filters):
+ """list actions of all users."""
+ return utils.list_db_objects(
+ session, models.UserLog, order_by=['timestamp'], **filters
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_admin_or_owner()
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_user_actions(user_id, user=None, session=None, **filters):
+ """delete actions of a user."""
+ del_user = user_api.get_user(user_id, user=user, session=session)
+ return utils.del_db_objects(
+ session, models.UserLog, user_id=del_user['id'], **filters
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_actions(user=None, session=None, **filters):
+ """delete actions of all users."""
+ return utils.del_db_objects(
+ session, models.UserLog, **filters
+ )
diff --git a/compass-deck/db/api/utils.py b/compass-deck/db/api/utils.py
new file mode 100644
index 0000000..a44f26e
--- /dev/null
+++ b/compass-deck/db/api/utils.py
@@ -0,0 +1,1286 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utils for database usage."""
+
+import functools
+import inspect
+import logging
+import netaddr
+import re
+
+from inspect import isfunction
+from sqlalchemy import and_
+from sqlalchemy import or_
+
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+def model_query(session, model):
+ """model query.
+
+ Return sqlalchemy query object.
+ """
+ if not issubclass(model, models.BASE):
+ raise exception.DatabaseException("model should be sublass of BASE!")
+
+ return session.query(model)
+
+
+def _default_list_condition_func(col_attr, value, condition_func):
+ """The default condition func for a list of data.
+
+ Given the condition func for single item of data, this function
+ wrap the condition_func and return another condition func using
+ or_ to merge the conditions of each single item to deal with a
+ list of data item.
+
+ Args:
+ col_attr: the colomn name
+ value: the column value need to be compared.
+ condition_func: the sqlalchemy condition object like ==
+
+ Examples:
+ col_attr is name, value is ['a', 'b', 'c'] and
+ condition_func is ==, the returned condition is
+ name == 'a' or name == 'b' or name == 'c'
+ """
+ conditions = []
+ for sub_value in value:
+ condition = condition_func(col_attr, sub_value)
+ if condition is not None:
+ conditions.append(condition)
+ if conditions:
+ return or_(*conditions)
+ else:
+ return None
+
+
+def _one_item_list_condition_func(col_attr, value, condition_func):
+ """The wrapper condition func to deal with one item data list.
+
+ For simplification, it is used to reduce generating too complex
+ sql conditions.
+ """
+ if value:
+ return condition_func(col_attr, value[0])
+ else:
+ return None
+
+
+def _model_condition_func(
+ col_attr, value,
+ item_condition_func,
+ list_condition_func=_default_list_condition_func
+):
+ """Return sql condition based on value type."""
+ if isinstance(value, list):
+ if not value:
+ return None
+ if len(value) == 1:
+ return item_condition_func(col_attr, value)
+ return list_condition_func(
+ col_attr, value, item_condition_func
+ )
+ else:
+ return item_condition_func(col_attr, value)
+
+
+def _between_condition(col_attr, value):
+ """Return sql range condition."""
+ if value[0] is not None and value[1] is not None:
+ return col_attr.between(value[0], value[1])
+ if value[0] is not None:
+ return col_attr >= value[0]
+ if value[1] is not None:
+ return col_attr <= value[1]
+ return None
+
+
+def model_order_by(query, model, order_by):
+ """append order by into sql query model."""
+ if not order_by:
+ return query
+ order_by_cols = []
+ for key in order_by:
+ if isinstance(key, tuple):
+ key, is_desc = key
+ else:
+ is_desc = False
+ if isinstance(key, basestring):
+ if hasattr(model, key):
+ col_attr = getattr(model, key)
+ else:
+ continue
+ else:
+ col_attr = key
+ if is_desc:
+ order_by_cols.append(col_attr.desc())
+ else:
+ order_by_cols.append(col_attr)
+ return query.order_by(*order_by_cols)
+
+
+def _model_condition(col_attr, value):
+ """Generate condition for one column.
+
+ Example for col_attr is name:
+ value is 'a': name == 'a'
+ value is ['a']: name == 'a'
+ value is ['a', 'b']: name == 'a' or name == 'b'
+ value is {'eq': 'a'}: name == 'a'
+ value is {'lt': 'a'}: name < 'a'
+ value is {'le': 'a'}: name <= 'a'
+ value is {'gt': 'a'}: name > 'a'
+ value is {'ge': 'a'}: name >= 'a'
+ value is {'ne': 'a'}: name != 'a'
+ value is {'in': ['a', 'b']}: name in ['a', 'b']
+ value is {'notin': ['a', 'b']}: name not in ['a', 'b']
+ value is {'startswith': 'abc'}: name like 'abc%'
+ value is {'endswith': 'abc'}: name like '%abc'
+ value is {'like': 'abc'}: name like '%abc%'
+ value is {'between': ('a', 'c')}: name >= 'a' and name <= 'c'
+ value is [{'lt': 'a'}]: name < 'a'
+ value is [{'lt': 'a'}, {'gt': c'}]: name < 'a' or name > 'c'
+ value is {'lt': 'c', 'gt': 'a'}: name > 'a' and name < 'c'
+
+ If value is a list, the condition is the or relationship among
+ conditions of each item.
+ If value is dict and there are multi keys in the dict, the relationship
+ is and conditions of each key.
+ Otherwise the condition is to compare the column with the value.
+ """
+ if isinstance(value, list):
+ basetype_values = []
+ composite_values = []
+ for item in value:
+ if isinstance(item, (list, dict)):
+ composite_values.append(item)
+ else:
+ basetype_values.append(item)
+ conditions = []
+ if basetype_values:
+ if len(basetype_values) == 1:
+ condition = (col_attr == basetype_values[0])
+ else:
+ condition = col_attr.in_(basetype_values)
+ conditions.append(condition)
+ for composite_value in composite_values:
+ condition = _model_condition(col_attr, composite_value)
+ if condition is not None:
+ conditions.append(condition)
+ if not conditions:
+ return None
+ if len(conditions) == 1:
+ return conditions[0]
+ return or_(*conditions)
+ elif isinstance(value, dict):
+ conditions = []
+ if 'eq' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['eq'],
+ lambda attr, data: attr == data,
+ lambda attr, data, item_condition_func: attr.in_(data)
+ ))
+ if 'lt' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['lt'],
+ lambda attr, data: attr < data,
+ _one_item_list_condition_func
+ ))
+ if 'gt' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['gt'],
+ lambda attr, data: attr > data,
+ _one_item_list_condition_func
+ ))
+ if 'le' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['le'],
+ lambda attr, data: attr <= data,
+ _one_item_list_condition_func
+ ))
+ if 'ge' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['ge'],
+ lambda attr, data: attr >= data,
+ _one_item_list_condition_func
+ ))
+ if 'ne' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['ne'],
+ lambda attr, data: attr != data,
+ lambda attr, data, item_condition_func: attr.notin_(data)
+ ))
+ if 'in' in value:
+ conditions.append(col_attr.in_(value['in']))
+ if 'notin' in value:
+ conditions.append(col_attr.notin_(value['notin']))
+ if 'startswith' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['startswith'],
+ lambda attr, data: attr.like('%s%%' % data)
+ ))
+ if 'endswith' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['endswith'],
+ lambda attr, data: attr.like('%%%s' % data)
+ ))
+ if 'like' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['like'],
+ lambda attr, data: attr.like('%%%s%%' % data)
+ ))
+ if 'between' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['between'],
+ _between_condition
+ ))
+ conditions = [
+ condition
+ for condition in conditions
+ if condition is not None
+ ]
+ if not conditions:
+ return None
+ if len(conditions) == 1:
+ return conditions[0]
+ return and_(conditions)
+ else:
+ condition = (col_attr == value)
+ return condition
+
+
+def model_filter(query, model, **filters):
+ """Append conditons to query for each possible column."""
+ for key, value in filters.items():
+ if isinstance(key, basestring):
+ if hasattr(model, key):
+ col_attr = getattr(model, key)
+ else:
+ continue
+ else:
+ col_attr = key
+ condition = _model_condition(col_attr, value)
+ if condition is not None:
+ query = query.filter(condition)
+ return query
+
+
+def replace_output(**output_mapping):
+ """Decorator to recursively relace output by output mapping.
+
+ The replacement detail is described in _replace_output.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ return _replace_output(
+ func(*args, **kwargs), **output_mapping
+ )
+ return wrapper
+ return decorator
+
+
+def _replace_output(data, **output_mapping):
+ """Helper to replace output data.
+
+ Example:
+ data = {'a': 'hello'}
+ output_mapping = {'a': 'b'}
+ returns: {'b': 'hello'}
+
+ data = {'a': {'b': 'hello'}}
+ output_mapping = {'a': 'b'}
+ returns: {'b': {'b': 'hello'}}
+
+ data = {'a': {'b': 'hello'}}
+ output_mapping = {'a': {'b': 'c'}}
+ returns: {'a': {'c': 'hello'}}
+
+ data = [{'a': 'hello'}, {'a': 'hi'}]
+ output_mapping = {'a': 'b'}
+ returns: [{'b': 'hello'}, {'b': 'hi'}]
+ """
+ if isinstance(data, list):
+ return [
+ _replace_output(item, **output_mapping)
+ for item in data
+ ]
+ if not isinstance(data, dict):
+ raise exception.InvalidResponse(
+ '%s type is not dict' % data
+ )
+ info = {}
+ for key, value in data.items():
+ if key in output_mapping:
+ output_key = output_mapping[key]
+ if isinstance(output_key, basestring):
+ info[output_key] = value
+ else:
+ info[key] = (
+ _replace_output(value, **output_key)
+ )
+ else:
+ info[key] = value
+ return info
+
+
+def get_wrapped_func(func):
+ """Get wrapped function instance.
+
+ Example:
+ @dec1
+ @dec2
+ myfunc(*args, **kwargs)
+
+ get_wrapped_func(myfunc) returns function object with
+ following attributes:
+ __name__: 'myfunc'
+ args: args
+ kwargs: kwargs
+ otherwise myfunc is function object with following attributes:
+ __name__: partial object ...
+ args: ...
+ kwargs: ...
+ """
+ if func.func_closure:
+ for closure in func.func_closure:
+ if isfunction(closure.cell_contents):
+ return get_wrapped_func(closure.cell_contents)
+ return func
+ else:
+ return func
+
+
+def wrap_to_dict(support_keys=[], **filters):
+ """Decrator to convert returned object to dict.
+
+ The details is decribed in _wrapper_dict.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ return _wrapper_dict(
+ func(*args, **kwargs), support_keys, **filters
+ )
+ return wrapper
+ return decorator
+
+
+def _wrapper_dict(data, support_keys, **filters):
+ """Helper for warpping db object into dictionary.
+
+ If data is list, convert it to a list of dict
+ If data is Base model, convert it to dict
+ for the data as a dict, filter it with the supported keys.
+ For each filter_key, filter_value in filters, also filter
+ data[filter_key] by filter_value recursively if it exists.
+
+ Example:
+ data is models.Switch, it will be converted to
+ {
+ 'id': 1, 'ip': '10.0.0.1', 'ip_int': 123456,
+ 'credentials': {'version': 2, 'password': 'abc'}
+ }
+ Then if support_keys are ['id', 'ip', 'credentials'],
+ it will be filtered to {
+ 'id': 1, 'ip': '10.0.0.1',
+ 'credentials': {'version': 2, 'password': 'abc'}
+ }
+ Then if filters is {'credentials': ['version']},
+ it will be filtered to {
+ 'id': 1, 'ip': '10.0.0.1',
+ 'credentials': {'version': 2}
+ }
+ """
+ logging.debug(
+ 'wrap dict %s by support_keys=%s filters=%s',
+ data, support_keys, filters
+ )
+ if isinstance(data, list):
+ return [
+ _wrapper_dict(item, support_keys, **filters)
+ for item in data
+ ]
+ if isinstance(data, models.HelperMixin):
+ data = data.to_dict()
+ if not isinstance(data, dict):
+ raise exception.InvalidResponse(
+ 'response %s type is not dict' % data
+ )
+ info = {}
+ try:
+ for key in support_keys:
+ if key in data and data[key] is not None:
+ if key in filters:
+ filter_keys = filters[key]
+ if isinstance(filter_keys, dict):
+ info[key] = _wrapper_dict(
+ data[key], filter_keys.keys(),
+ **filter_keys
+ )
+ else:
+ info[key] = _wrapper_dict(
+ data[key], filter_keys
+ )
+ else:
+ info[key] = data[key]
+ return info
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+
+def replace_filters(**kwarg_mapping):
+ """Decorator to replace kwargs.
+
+ Examples:
+ kwargs: {'a': 'b'}, kwarg_mapping: {'a': 'c'}
+ replaced kwargs to decorated func:
+ {'c': 'b'}
+
+ replace_filters is used to replace caller's input
+ to make it understandable by models.py.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ replaced_kwargs = {}
+ for key, value in kwargs.items():
+ if key in kwarg_mapping:
+ replaced_kwargs[kwarg_mapping[key]] = value
+ else:
+ replaced_kwargs[key] = value
+ return func(*args, **replaced_kwargs)
+ return wrapper
+ return decorator
+
+
+def supported_filters(
+ support_keys=[],
+ optional_support_keys=[],
+ ignore_support_keys=[],
+):
+ """Decorator to check kwargs keys.
+
+ keys in kwargs and in ignore_support_keys will be removed.
+ If any unsupported keys found, a InvalidParameter
+ exception raises.
+
+ Args:
+ support_keys: keys that must exist.
+ optional_support_keys: keys that may exist.
+ ignore_support_keys: keys should be ignored.
+
+ Assumption: args without default value is supposed to exist.
+ You can add them in support_keys or not but we will make sure
+ it appears when we call the decorated function.
+ We do best match on both args and kwargs to make sure if the
+ key appears or not.
+
+ Examples:
+ decorated func: func(a, b, c=3, d=4, **kwargs)
+
+ support_keys=['e'] and call func(e=5):
+ raises: InvalidParameter: missing declared arg
+ support_keys=['e'] and call func(1,2,3,4,5,e=6):
+ raises: InvalidParameter: caller sending more args
+ support_keys=['e'] and call func(1,2):
+ raises: InvalidParameter: supported keys ['e'] missing
+ support_keys=['d', 'e'] and call func(1,2,e=3):
+ raises: InvalidParameter: supported keys ['d'] missing
+ support_keys=['d', 'e'] and call func(1,2,d=4, e=3):
+ passed
+ support_keys=['d'], optional_support_keys=['e']
+ and call func(1,2, d=3):
+ passed
+ support_keys=['d'], optional_support_keys=['e']
+ and call func(1,2, d=3, e=4, f=5):
+ raises: InvalidParameter: unsupported keys ['f']
+ support_keys=['d'], optional_support_keys=['e'],
+ ignore_support_keys=['f']
+ and call func(1,2, d=3, e=4, f=5):
+ passed to decorated keys: func(1,2, d=3, e=4)
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **filters):
+ wrapped_func = get_wrapped_func(func)
+ argspec = inspect.getargspec(wrapped_func)
+ wrapped_args = argspec.args
+ args_defaults = argspec.defaults
+ # wrapped_must_args are positional args caller must pass in.
+ if args_defaults:
+ wrapped_must_args = wrapped_args[:-len(args_defaults)]
+ else:
+ wrapped_must_args = wrapped_args[:]
+ # make sure any positional args without default value in
+ # decorated function should appear in args or filters.
+ if len(args) < len(wrapped_must_args):
+ remain_args = wrapped_must_args[len(args):]
+ for remain_arg in remain_args:
+ if remain_arg not in filters:
+ raise exception.InvalidParameter(
+ 'function missing declared arg %s '
+ 'while caller sends args %s' % (
+ remain_arg, args
+ )
+ )
+ # make sure args should be no more than positional args
+ # declared in decorated function.
+ if len(args) > len(wrapped_args):
+ raise exception.InvalidParameter(
+ 'function definition args %s while the caller '
+ 'sends args %s' % (
+ wrapped_args, args
+ )
+ )
+ # exist_args are positional args caller has given.
+ exist_args = dict(zip(wrapped_args, args)).keys()
+ must_support_keys = set(support_keys)
+ all_support_keys = must_support_keys | set(optional_support_keys)
+ wrapped_supported_keys = set(filters) | set(exist_args)
+ unsupported_keys = (
+ set(filters) - set(wrapped_args) -
+ all_support_keys - set(ignore_support_keys)
+ )
+ # unsupported_keys are the keys that are not in support_keys,
+ # optional_support_keys, ignore_support_keys and are not passed in
+ # by positional args. It means the decorated function may
+ # not understand these parameters.
+ if unsupported_keys:
+ raise exception.InvalidParameter(
+ 'filter keys %s are not supported for %s' % (
+ list(unsupported_keys), wrapped_func
+ )
+ )
+ # missing_keys are the keys that must exist but missing in
+ # both positional args or kwargs.
+ missing_keys = must_support_keys - wrapped_supported_keys
+ if missing_keys:
+ raise exception.InvalidParameter(
+ 'filter keys %s not found for %s' % (
+ list(missing_keys), wrapped_func
+ )
+ )
+ # We filter kwargs to eliminate ignore_support_keys in kwargs
+ # passed to decorated function.
+ filtered_filters = dict([
+ (key, value)
+ for key, value in filters.items()
+ if key not in ignore_support_keys
+ ])
+ return func(*args, **filtered_filters)
+ return wrapper
+ return decorator
+
+
+def input_filters(
+ **filters
+):
+ """Decorator to filter kwargs.
+
+ For key in kwargs, if the key exists and filters
+ and the return of call filters[key] is False, the key
+ will be removed from kwargs.
+
+ The function definition of filters[key] is
+ func(value, *args, **kwargs) compared with decorated
+ function func(*args, **kwargs)
+
+ The function is used to filter kwargs in case some
+ kwargs should be removed conditionally depends on the
+ related filters.
+
+ Examples:
+ filters={'a': func(value, *args, **kwargs)}
+ @input_filters(**filters)
+ decorated_func(*args, **kwargs)
+ func returns False.
+ Then when call decorated_func(a=1, b=2)
+ it will be actually called the decorated func with
+ b=2. a=1 will be removed since it does not pass filtering.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ filtered_kwargs = {}
+ for key, value in kwargs.items():
+ if key in filters:
+ if filters[key](value, *args, **kwargs):
+ filtered_kwargs[key] = value
+ else:
+ logging.debug(
+ 'ignore filtered input key %s' % key
+ )
+ else:
+ filtered_kwargs[key] = value
+ return func(*args, **filtered_kwargs)
+ return wrapper
+ return decorator
+
+
+def _obj_equal_or_subset(check, obj):
+ """Used by output filter to check if obj is in check."""
+ if check == obj:
+ return True
+ if not issubclass(obj.__class__, check.__class__):
+ return False
+ if isinstance(obj, dict):
+ return _dict_equal_or_subset(check, obj)
+ elif isinstance(obj, list):
+ return _list_equal_or_subset(check, obj)
+ else:
+ return False
+
+
+def _list_equal_or_subset(check_list, obj_list):
+ """Used by output filter to check if obj_list is in check_list"""
+ if not isinstance(check_list, list):
+ return False
+ return set(check_list).issubset(set(obj_list))
+
+
+def _dict_equal_or_subset(check_dict, obj_dict):
+ """Used by output filter to check if obj_dict in check_dict."""
+ if not isinstance(check_dict, dict):
+ return False
+ for key, value in check_dict.items():
+ if (
+ key not in obj_dict or
+ not _obj_equal_or_subset(check_dict[key], obj_dict[key])
+ ):
+ return False
+ return True
+
+
+def general_filter_callback(general_filter, obj):
+ """General filter function to filter output.
+
+ Since some fields stored in database is json encoded and
+ we want to do the deep match for the json encoded field to
+ do the filtering in some cases, we introduces the output_filters
+ and general_filter_callback to deal with this kind of cases.
+
+ We do special treatment for key 'resp_eq' to check if
+ obj is the recursively subset of general_filter['resp_eq']
+
+
+ Example:
+ obj: 'b'
+ general_filter: {}
+ returns: True
+
+ obj: 'b'
+ general_filter: {'resp_in': ['a', 'b']}
+ returns: True
+
+ obj: 'b'
+ general_filter: {'resp_in': ['a']}
+ returns: False
+
+ obj: 'b'
+ general_filter: {'resp_eq': 'b'}
+ returns: True
+
+ obj: 'b'
+ general_filter: {'resp_eq': 'a'}
+ returns: False
+
+ obj: 'b'
+ general_filter: {'resp_range': ('a', 'c')}
+ returns: True
+
+ obj: 'd'
+ general_filter: {'resp_range': ('a', 'c')}
+ returns: False
+
+ If there are multi keys in dict, the output is filtered
+ by and relationship.
+
+ If the general_filter is a list, the output is filtered
+ by or relationship.
+
+ Supported general filters: [
+ 'resp_eq', 'resp_in', 'resp_lt',
+ 'resp_le', 'resp_gt', 'resp_ge',
+ 'resp_match', 'resp_range'
+ ]
+ """
+ if isinstance(general_filter, list):
+ if not general_filter:
+ return True
+ return any([
+ general_filter_callback(item, obj)
+ for item in general_filter
+ ])
+ elif isinstance(general_filter, dict):
+ if 'resp_eq' in general_filter:
+ if not _obj_equal_or_subset(
+ general_filter['resp_eq'], obj
+ ):
+ return False
+ if 'resp_in' in general_filter:
+ in_filters = general_filter['resp_in']
+ if not any([
+ _obj_equal_or_subset(in_filer, obj)
+ for in_filer in in_filters
+ ]):
+ return False
+ if 'resp_lt' in general_filter:
+ if obj >= general_filter['resp_lt']:
+ return False
+ if 'resp_le' in general_filter:
+ if obj > general_filter['resp_le']:
+ return False
+ if 'resp_gt' in general_filter:
+ if obj <= general_filter['resp_gt']:
+ return False
+ if 'resp_ge' in general_filter:
+ if obj < general_filter['resp_gt']:
+ return False
+ if 'resp_match' in general_filter:
+ if not re.match(general_filter['resp_match'], obj):
+ return False
+ if 'resp_range' in general_filter:
+ resp_range = general_filter['resp_range']
+ if not isinstance(resp_range, list):
+ resp_range = [resp_range]
+ in_range = False
+ for range_start, range_end in resp_range:
+ if range_start <= obj <= range_end:
+ in_range = True
+ if not in_range:
+ return False
+ return True
+ else:
+ return True
+
+
+def filter_output(filter_callbacks, kwargs, obj, missing_ok=False):
+ """Filter ouput.
+
+ For each key in filter_callbacks, if it exists in kwargs,
+ kwargs[key] tells what we need to filter. If the call of
+ filter_callbacks[key] returns False, it tells the obj should be
+ filtered out of output.
+ """
+ for callback_key, callback_value in filter_callbacks.items():
+ if callback_key not in kwargs:
+ continue
+ if callback_key not in obj:
+ if missing_ok:
+ continue
+ else:
+ raise exception.InvalidResponse(
+ '%s is not in %s' % (callback_key, obj)
+ )
+ if not callback_value(
+ kwargs[callback_key], obj[callback_key]
+ ):
+ return False
+ return True
+
+
+def output_filters(missing_ok=False, **filter_callbacks):
+ """Decorator to filter output list.
+
+ Each filter_callback should have the definition like:
+ func({'resp_eq': 'a'}, 'a')
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ filtered_obj_list = []
+ obj_list = func(*args, **kwargs)
+ for obj in obj_list:
+ if filter_output(
+ filter_callbacks, kwargs, obj, missing_ok
+ ):
+ filtered_obj_list.append(obj)
+ return filtered_obj_list
+ return wrapper
+ return decorator
+
+
+def _input_validates(args_validators, kwargs_validators, *args, **kwargs):
+ """Used by input_validators to validate inputs."""
+ for i, value in enumerate(args):
+ if i < len(args_validators) and args_validators[i]:
+ args_validators[i](value)
+ for key, value in kwargs.items():
+ if kwargs_validators.get(key):
+ kwargs_validators[key](value)
+
+
+def input_validates(*args_validators, **kwargs_validators):
+ """Decorator to validate input.
+
+ Each validator should have definition like:
+ func('00:01:02:03:04:05')
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ _input_validates(
+ args_validators, kwargs_validators,
+ *args, **kwargs
+ )
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def _input_validates_with_args(
+ args_validators, kwargs_validators, *args, **kwargs
+):
+ """Validate input with validators.
+
+ Each validator takes the arguments of the decorated function
+ as its arguments. The function definition is like:
+ func(value, *args, **kwargs) compared with the decorated
+ function func(*args, **kwargs).
+ """
+ for i, value in enumerate(args):
+ if i < len(args_validators) and args_validators[i]:
+ args_validators[i](value, *args, **kwargs)
+ for key, value in kwargs.items():
+ if kwargs_validators.get(key):
+ kwargs_validators[key](value, *args, **kwargs)
+
+
+def input_validates_with_args(
+ *args_validators, **kwargs_validators
+):
+ """Decorator to validate input."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ _input_validates_with_args(
+ args_validators, kwargs_validators,
+ *args, **kwargs
+ )
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def _output_validates_with_args(
+ kwargs_validators, obj, *args, **kwargs
+):
+ """Validate output with validators.
+
+ Each validator takes the arguments of the decorated function
+ as its arguments. The function definition is like:
+ func(value, *args, **kwargs) compared with the decorated
+ function func(*args, **kwargs).
+ """
+ if isinstance(obj, list):
+ for item in obj:
+ _output_validates_with_args(
+ kwargs_validators, item, *args, **kwargs
+ )
+ return
+ if isinstance(obj, models.HelperMixin):
+ obj = obj.to_dict()
+ if not isinstance(obj, dict):
+ raise exception.InvalidResponse(
+ 'response %s type is not dict' % str(obj)
+ )
+ try:
+ for key, value in obj.items():
+ if key in kwargs_validators:
+ kwargs_validators[key](value, *args, **kwargs)
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+
+def output_validates_with_args(**kwargs_validators):
+ """Decorator to validate output.
+
+ The validator can take the arguments of the decorated
+ function as its arguments.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ obj = func(*args, **kwargs)
+ if isinstance(obj, list):
+ for obj_item in obj:
+ _output_validates_with_args(
+ kwargs_validators, obj_item,
+ *args, **kwargs
+ )
+ else:
+ _output_validates_with_args(
+ kwargs_validators, obj,
+ *args, **kwargs
+ )
+ return obj
+ return wrapper
+ return decorator
+
+
+def _output_validates(kwargs_validators, obj):
+ """Validate output.
+
+ Each validator has following signature:
+ func(value)
+ """
+ if isinstance(obj, list):
+ for item in obj:
+ _output_validates(kwargs_validators, item)
+ return
+ if isinstance(obj, models.HelperMixin):
+ obj = obj.to_dict()
+ if not isinstance(obj, dict):
+ raise exception.InvalidResponse(
+ 'response %s type is not dict' % str(obj)
+ )
+ try:
+ for key, value in obj.items():
+ if key in kwargs_validators:
+ kwargs_validators[key](value)
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+
+def output_validates(**kwargs_validators):
+ """Decorator to validate output."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ obj = func(*args, **kwargs)
+ if isinstance(obj, list):
+ for obj_item in obj:
+ _output_validates(kwargs_validators, obj_item)
+ else:
+ _output_validates(kwargs_validators, obj)
+ return obj
+ return wrapper
+ return decorator
+
+
+def get_db_object(session, table, exception_when_missing=True, **kwargs):
+ """Get db object.
+
+ If not exception_when_missing and the db object can not be found,
+ return None instead of raising exception.
+ """
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s get db object %s from table %s',
+ id(session), kwargs, table.__name__)
+ db_object = model_filter(
+ model_query(session, table), table, **kwargs
+ ).first()
+ logging.debug(
+ 'session %s got db object %s', id(session), db_object
+ )
+ if db_object:
+ return db_object
+
+ if not exception_when_missing:
+ return None
+
+ raise exception.RecordNotExists(
+ 'Cannot find the record in table %s: %s' % (
+ table.__name__, kwargs
+ )
+ )
+
+
+def add_db_object(session, table, exception_when_existing=True,
+ *args, **kwargs):
+ """Create db object.
+
+ If not exception_when_existing and the db object exists,
+ Instead of raising exception, updating the existing db object.
+ """
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s add object %s atributes %s to table %s',
+ id(session), args, kwargs, table.__name__)
+ argspec = inspect.getargspec(table.__init__)
+ arg_names = argspec.args[1:]
+ arg_defaults = argspec.defaults
+ if not arg_defaults:
+ arg_defaults = []
+ if not (
+ len(arg_names) - len(arg_defaults) <= len(args) <= len(arg_names)
+ ):
+ raise exception.InvalidParameter(
+ 'arg names %s does not match arg values %s' % (
+ arg_names, args)
+ )
+ db_keys = dict(zip(arg_names, args))
+ if db_keys:
+ db_object = session.query(table).filter_by(**db_keys).first()
+ else:
+ db_object = None
+
+ new_object = False
+ if db_object:
+ logging.debug(
+ 'got db object %s: %s', db_keys, db_object
+ )
+ if exception_when_existing:
+ raise exception.DuplicatedRecord(
+ '%s exists in table %s' % (db_keys, table.__name__)
+ )
+ else:
+ db_object = table(**db_keys)
+ new_object = True
+
+ for key, value in kwargs.items():
+ setattr(db_object, key, value)
+
+ if new_object:
+ session.add(db_object)
+ session.flush()
+ db_object.initialize()
+ db_object.validate()
+ logging.debug(
+ 'session %s db object %s added', id(session), db_object
+ )
+ return db_object
+
+
+def list_db_objects(session, table, order_by=[], **filters):
+ """List db objects.
+
+ If order by given, the db objects should be sorted by the ordered keys.
+ """
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s list db objects by filters %s in table %s',
+ id(session), filters, table.__name__
+ )
+ db_objects = model_order_by(
+ model_filter(
+ model_query(session, table),
+ table,
+ **filters
+ ),
+ table,
+ order_by
+ ).all()
+ logging.debug(
+ 'session %s got listed db objects: %s',
+ id(session), db_objects
+ )
+ return db_objects
+
+
+def del_db_objects(session, table, **filters):
+ """delete db objects."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s delete db objects by filters %s in table %s',
+ id(session), filters, table.__name__
+ )
+ query = model_filter(
+ model_query(session, table), table, **filters
+ )
+ db_objects = query.all()
+ query.delete(synchronize_session=False)
+ logging.debug(
+ 'session %s db objects %s deleted', id(session), db_objects
+ )
+ return db_objects
+
+
+def update_db_objects(session, table, updates={}, **filters):
+ """Update db objects."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s update db objects by filters %s in table %s',
+ id(session), filters, table.__name__)
+ db_objects = model_filter(
+ model_query(session, table), table, **filters
+ ).all()
+ for db_object in db_objects:
+ logging.debug('update db object %s: %s', db_object, updates)
+ update_db_object(session, db_object, **updates)
+ logging.debug(
+ 'session %s db objects %s updated',
+ id(session), db_objects
+ )
+ return db_objects
+
+
+def update_db_object(session, db_object, **kwargs):
+ """Update db object."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s update db object %s by value %s',
+ id(session), db_object, kwargs
+ )
+ for key, value in kwargs.items():
+ setattr(db_object, key, value)
+ session.flush()
+ db_object.update()
+ db_object.validate()
+ logging.debug(
+ 'session %s db object %s updated',
+ id(session), db_object
+ )
+ return db_object
+
+
+def del_db_object(session, db_object):
+ """Delete db object."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s delete db object %s',
+ id(session), db_object
+ )
+ session.delete(db_object)
+ logging.debug(
+ 'session %s db object %s deleted',
+ id(session), db_object
+ )
+ return db_object
+
+
+def check_ip(ip):
+ """Check ip is ip address formatted."""
+ try:
+ netaddr.IPAddress(ip)
+ except Exception as error:
+ logging.exception(error)
+ raise exception.InvalidParameter(
+ 'ip address %s format uncorrect' % ip
+ )
+
+
+def check_mac(mac):
+ """Check mac is mac address formatted."""
+ try:
+ netaddr.EUI(mac)
+ except Exception as error:
+ logging.exception(error)
+ raise exception.InvalidParameter(
+ 'invalid mac address %s' % mac
+ )
+
+
+NAME_PATTERN = re.compile(r'[a-zA-Z0-9][a-zA-Z0-9_-]*')
+
+
+def check_name(name):
+ """Check name meeting name format requirement."""
+ if not NAME_PATTERN.match(name):
+ raise exception.InvalidParameter(
+ 'name %s does not match the pattern %s' % (
+ name, NAME_PATTERN.pattern
+ )
+ )
+
+
+def _check_ipmi_credentials_ip(ip):
+ check_ip(ip)
+
+
+def check_ipmi_credentials(ipmi_credentials):
+ """Check ipmi credentials format is correct."""
+ if not ipmi_credentials:
+ return
+ if not isinstance(ipmi_credentials, dict):
+ raise exception.InvalidParameter(
+ 'invalid ipmi credentials %s' % ipmi_credentials
+
+ )
+ for key in ipmi_credentials:
+ if key not in ['ip', 'username', 'password']:
+ raise exception.InvalidParameter(
+ 'unrecognized field %s in ipmi credentials %s' % (
+ key, ipmi_credentials
+ )
+ )
+ for key in ['ip', 'username', 'password']:
+ if key not in ipmi_credentials:
+ raise exception.InvalidParameter(
+ 'no field %s in ipmi credentials %s' % (
+ key, ipmi_credentials
+ )
+ )
+ check_ipmi_credential_field = '_check_ipmi_credentials_%s' % key
+ this_module = globals()
+ if check_ipmi_credential_field in this_module:
+ this_module[check_ipmi_credential_field](
+ ipmi_credentials[key]
+ )
+ else:
+ logging.debug(
+ 'function %s is not defined', check_ipmi_credential_field
+ )
+
+
+def _check_switch_credentials_version(version):
+ if version not in ['1', '2c', '3']:
+ raise exception.InvalidParameter(
+ 'unknown snmp version %s' % version
+ )
+
+
+def check_switch_credentials(credentials):
+ """Check switch credentials format is correct."""
+ if not credentials:
+ return
+ if not isinstance(credentials, dict):
+ raise exception.InvalidParameter(
+ 'credentials %s is not dict' % credentials
+ )
+ for key in credentials:
+ if key not in ['version', 'community']:
+ raise exception.InvalidParameter(
+ 'unrecognized key %s in credentials %s' % (key, credentials)
+ )
+ for key in ['version', 'community']:
+ if key not in credentials:
+ raise exception.InvalidParameter(
+ 'there is no %s field in credentials %s' % (key, credentials)
+ )
+
+ key_check_func_name = '_check_switch_credentials_%s' % key
+ this_module = globals()
+ if key_check_func_name in this_module:
+ this_module[key_check_func_name](
+ credentials[key]
+ )
+ else:
+ logging.debug(
+ 'function %s is not defined',
+ key_check_func_name
+ )
diff --git a/compass-deck/db/callback.py b/compass-deck/db/callback.py
new file mode 100644
index 0000000..35798bc
--- /dev/null
+++ b/compass-deck/db/callback.py
@@ -0,0 +1,204 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metadata Callback methods."""
+import logging
+import netaddr
+import random
+import re
+import socket
+
+from compass.db import exception
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+CALLBACK_GLOBALS = globals()
+CALLBACK_LOCALS = locals()
+CALLBACK_CONFIGS = util.load_configs(
+ setting.CALLBACK_DIR,
+ config_name_suffix='.py',
+ env_globals=CALLBACK_GLOBALS,
+ env_locals=CALLBACK_LOCALS
+)
+for callback_config in CALLBACK_CONFIGS:
+ CALLBACK_LOCALS.update(callback_config)
+
+
+def default_proxy(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_PROXY
+
+
+def proxy_options(name, **kwargs):
+ return [setting.COMPASS_SUPPORTED_PROXY]
+
+
+def default_noproxy(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
+
+
+def noproxy_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
+
+
+def default_ntp_server(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_NTP_SERVER
+
+
+def ntp_server_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_NTP_SERVER
+
+
+def default_dns_servers(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DNS_SERVERS
+
+
+def dns_servers_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DNS_SERVERS
+
+
+def default_domain(name, **kwargs):
+ if setting.COMPASS_SUPPORTED_DOMAINS:
+ return setting.COMPASS_SUPPORTED_DOMAINS[0]
+ else:
+ return None
+
+
+def domain_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DOMAINS
+
+
+def default_search_path(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DOMAINS
+
+
+def search_path_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DOMAINS
+
+
+def default_gateway(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY
+
+
+def default_gateway_options(name, **kwargs):
+ return [setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY]
+
+
+def default_localrepo(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_LOCAL_REPO
+
+
+def default_localrepo_options(name, **kwargs):
+ return [setting.COMPASS_SUPPORTED_LOCAL_REPO]
+
+
+def autofill_callback_default(name, config, **kwargs):
+ if config is None:
+ if (
+ 'autofill_types' not in kwargs or
+ not (set(kwargs['autofill_types']) & set(kwargs))
+ ):
+ return None
+ if 'default_value' not in kwargs:
+ return None
+ return kwargs['default_value']
+ return config
+
+
+def autofill_callback_random_option(name, config, **kwargs):
+ if config is None:
+ if (
+ 'autofill_types' not in kwargs or
+ not (set(kwargs['autofill_types']) & set(kwargs))
+ ):
+ return None
+ if 'options' not in kwargs or not kwargs['options']:
+ return None
+ return random.choice(kwargs['options'])
+ return config
+
+
+def autofill_no_proxy(name, config, **kwargs):
+ logging.debug(
+ 'autofill %s config %s by params %s',
+ name, config, kwargs
+ )
+ if 'cluster' in kwargs:
+ if config is None:
+ config = []
+ if 'default_value' in kwargs:
+ for default_no_proxy in kwargs['default_value']:
+ if default_no_proxy and default_no_proxy not in config:
+ config.append(default_no_proxy)
+ cluster = kwargs['cluster']
+ for clusterhost in cluster.clusterhosts:
+ host = clusterhost.host
+ hostname = host.name
+ if hostname not in config:
+ config.append(hostname)
+ for host_network in host.host_networks:
+ if host_network.is_mgmt:
+ ip = host_network.ip
+ if ip not in config:
+ config.append(ip)
+ if not config:
+ return config
+ return [no_proxy for no_proxy in config if no_proxy]
+
+
+def autofill_network_mapping(name, config, **kwargs):
+ logging.debug(
+ 'autofill %s config %s by params %s',
+ name, config, kwargs
+ )
+ if not config:
+ return config
+ if isinstance(config, basestring):
+ config = {
+ 'interface': config,
+ 'subnet': None
+ }
+ if not isinstance(config, dict):
+ return config
+ if 'interface' not in config:
+ return config
+ subnet = None
+ interface = config['interface']
+ if 'cluster' in kwargs:
+ cluster = kwargs['cluster']
+ for clusterhost in cluster.clusterhosts:
+ host = clusterhost.host
+ for host_network in host.host_networks:
+ if host_network.interface == interface:
+ subnet = host_network.subnet.subnet
+ elif 'clusterhost' in kwargs:
+ clusterhost = kwargs['clusterhost']
+ host = clusterhost.host
+ for host_network in host.host_networks:
+ if host_network.interface == interface:
+ subnet = host_network.subnet.subnet
+ if not subnet:
+ raise exception.InvalidParameter(
+ 'interface %s not found in host(s)' % interface
+ )
+ if 'subnet' not in config or not config['subnet']:
+ config['subnet'] = subnet
+ else:
+ if config['subnet'] != subnet:
+ raise exception.InvalidParameter(
+ 'subnet %s in config is not equal to subnet %s in hosts' % (
+ config['subnet'], subnet
+ )
+ )
+ return config
diff --git a/compass-deck/db/config_validation/__init__.py b/compass-deck/db/config_validation/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/db/config_validation/__init__.py
diff --git a/compass-deck/db/config_validation/default_validator.py b/compass-deck/db/config_validation/default_validator.py
new file mode 100644
index 0000000..224447f
--- /dev/null
+++ b/compass-deck/db/config_validation/default_validator.py
@@ -0,0 +1,131 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Default config validation function."""
+
+from sqlalchemy import or_
+
+from compass.db.models import OSConfigField
+from compass.db.models import OSConfigMetadata
+from compass.db import validator
+
+MAPPER = {
+ "os_id": {
+ "metaTable": OSConfigMetadata,
+ "metaFieldTable": OSConfigField
+ }
+ # "adapter_id": {
+ # "metaTable": AdapterConfigMetadata,
+ # "metaFieldTable": AdapterConfigField
+ # }
+}
+
+
+def validate_config(session, config, id_name, id_value, patch=True):
+ """Validates config.
+
+ Validates the given config value according to the config
+ metadata of the asscoiated os_id or adapter_id. Returns
+ a tuple (status, message).
+ """
+ if id_name not in MAPPER.keys():
+ return (False, "Invalid id type %s" % id_name)
+
+ meta_table = MAPPER[id_name]['metaTable']
+ metafield_table = MAPPER[id_name]['metaFieldTable']
+ with session.begin(subtransactions=True):
+ name_col = name_col = getattr(meta_table, 'name')
+ id_col = getattr(meta_table, id_name)
+
+ return _validate_config_helper(session, config,
+ name_col, id_col, id_value,
+ meta_table, metafield_table,
+ patch)
+
+
+def _validate_config_helper(session, config,
+ name_col, id_col, id_value,
+ meta_table, metafield_table, patch=True):
+
+ with session.begin(subtransactions=True):
+ for elem in config:
+
+ obj = session.query(meta_table).filter(name_col == elem)\
+ .filter(or_(id_col is None,
+ id_col == id_value)).first()
+
+ if not obj and "_type" not in config[elem]:
+ return (False, "Invalid metadata '%s'!" % elem)
+
+ if "_type" in config[elem]:
+ # Metadata is a variable
+ metadata_name = config[elem]['_type']
+ obj = session.query(meta_table).filter_by(name=metadata_name)\
+ .first()
+
+ if not obj:
+ err_msg = ("Invalid metatdata '%s' or missing '_type'"
+ "to indicate this is a variable metatdata."
+ % elem)
+ return (False, err_msg)
+
+ # TODO(Grace): validate metadata here
+ del config[elem]['_type']
+
+ fields = obj.fields
+
+ if not fields:
+ is_valid, message = _validate_config_helper(session,
+ config[elem],
+ name_col, id_col,
+ id_value,
+ meta_table,
+ metafield_table,
+ patch)
+ if not is_valid:
+ return (False, message)
+
+ else:
+ field_config = config[elem]
+ for key in field_config:
+ field = session.query(metafield_table)\
+ .filter_by(field=key).first()
+ if not field:
+ # The field is not in schema
+ return (False, "Invalid field '%s'!" % key)
+
+ value = field_config[key]
+ if field.is_required and value is None:
+ # The value of this field is required
+ # and cannot be none
+ err = "The value of field '%s' cannot be null!" % key
+ return (False, err)
+
+ if field.validator:
+ func = getattr(validator, field.validator)
+ if not func or not func(value):
+ err_msg = ("The value of the field '%s' is "
+ "invalid format or None!" % key)
+ return (False, err_msg)
+
+ # This is a PUT request. We need to check presence of all
+ # required fields.
+ if not patch:
+ for field in fields:
+ name = field.field
+ if field.is_required and name not in field_config:
+ return (False,
+ "Missing required field '%s'" % name)
+
+ return (True, None)
diff --git a/compass-deck/db/config_validation/extension/__init__.py b/compass-deck/db/config_validation/extension/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/db/config_validation/extension/__init__.py
diff --git a/compass-deck/db/config_validation/extension/openstack.py b/compass-deck/db/config_validation/extension/openstack.py
new file mode 100644
index 0000000..6b3af69
--- /dev/null
+++ b/compass-deck/db/config_validation/extension/openstack.py
@@ -0,0 +1,18 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def validate_cluster_config():
+ # TODO(xiaodong): Add openstack specific validation here.
+ pass
diff --git a/compass-deck/db/exception.py b/compass-deck/db/exception.py
new file mode 100644
index 0000000..44556c9
--- /dev/null
+++ b/compass-deck/db/exception.py
@@ -0,0 +1,116 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Custom exception"""
+import traceback
+
+
+class DatabaseException(Exception):
+ """Base class for all database exceptions."""
+ def __init__(self, message):
+ super(DatabaseException, self).__init__(message)
+ self.traceback = traceback.format_exc()
+ self.status_code = 400
+
+ def to_dict(self):
+ return {'message': str(self)}
+
+
+class RecordNotExists(DatabaseException):
+ """Define the exception for referring non-existing object in DB."""
+ def __init__(self, message):
+ super(RecordNotExists, self).__init__(message)
+ self.status_code = 404
+
+
+class DuplicatedRecord(DatabaseException):
+ """Define the exception for trying to insert an existing object in DB."""
+ def __init__(self, message):
+ super(DuplicatedRecord, self).__init__(message)
+ self.status_code = 409
+
+
+class Unauthorized(DatabaseException):
+ """Define the exception for invalid user login."""
+ def __init__(self, message):
+ super(Unauthorized, self).__init__(message)
+ self.status_code = 401
+
+
+class UserDisabled(DatabaseException):
+ """Define the exception that a disabled user tries to do some operations.
+
+ """
+ def __init__(self, message):
+ super(UserDisabled, self).__init__(message)
+ self.status_code = 403
+
+
+class Forbidden(DatabaseException):
+ """Define the exception that a user is trying to make some action
+
+ without the right permission.
+
+ """
+ def __init__(self, message):
+ super(Forbidden, self).__init__(message)
+ self.status_code = 403
+
+
+class NotAcceptable(DatabaseException):
+ """The data is not acceptable."""
+ def __init__(self, message):
+ super(NotAcceptable, self).__init__(message)
+ self.status_code = 406
+
+
+class InvalidParameter(DatabaseException):
+ """Define the exception that the request has invalid or missing parameters.
+
+ """
+ def __init__(self, message):
+ super(InvalidParameter, self).__init__(message)
+ self.status_code = 400
+
+
+class InvalidResponse(DatabaseException):
+ """Define the exception that the response is invalid.
+
+ """
+ def __init__(self, message):
+ super(InvalidResponse, self).__init__(message)
+ self.status_code = 400
+
+
+class MultiDatabaseException(DatabaseException):
+ """Define the exception composites with multi exceptions."""
+ def __init__(self, exceptions):
+ super(MultiDatabaseException, self).__init__('multi exceptions')
+ self.exceptions = exceptions
+ self.status_code = 400
+
+ @property
+ def traceback(self):
+ tracebacks = []
+ for exception in self.exceptions:
+ tracebacks.append(exception.trackback)
+
+ def to_dict(self):
+ dict_info = super(MultiDatabaseException, self).to_dict()
+ dict_info.update({
+ 'exceptions': [
+ exception.to_dict() for exception in self.exceptions
+ ]
+ })
+ return dict_info
diff --git a/compass-deck/db/models.py b/compass-deck/db/models.py
new file mode 100644
index 0000000..d4b0324
--- /dev/null
+++ b/compass-deck/db/models.py
@@ -0,0 +1,1924 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Database model"""
+import copy
+import datetime
+import logging
+import netaddr
+import re
+import simplejson as json
+
+from sqlalchemy import BigInteger
+from sqlalchemy import Boolean
+from sqlalchemy import Column
+from sqlalchemy import ColumnDefault
+from sqlalchemy import DateTime
+from sqlalchemy import Enum
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.ext.hybrid import hybrid_property
+from sqlalchemy import Float
+from sqlalchemy import ForeignKey
+from sqlalchemy import Integer
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import Text
+from sqlalchemy.types import TypeDecorator
+from sqlalchemy import UniqueConstraint
+
+from compass.db import exception
+from compass.utils import util
+
+
+BASE = declarative_base()
+
+
+class JSONEncoded(TypeDecorator):
+ """Represents an immutable structure as a json-encoded string."""
+
+ impl = Text
+
+ def process_bind_param(self, value, dialect):
+ if value is not None:
+ value = json.dumps(value)
+ return value
+
+ def process_result_value(self, value, dialect):
+ if value is not None:
+ value = json.loads(value)
+ return value
+
+
+class TimestampMixin(object):
+ """Provides table fields for each row created/updated timestamp."""
+ created_at = Column(DateTime, default=lambda: datetime.datetime.now())
+ updated_at = Column(DateTime, default=lambda: datetime.datetime.now(),
+ onupdate=lambda: datetime.datetime.now())
+
+
+class HelperMixin(object):
+ """Provides general fuctions for all compass table models."""
+
+ def initialize(self):
+ self.update()
+
+ def update(self):
+ pass
+
+ @staticmethod
+ def type_compatible(value, column_type):
+ """Check if value type is compatible with the column type."""
+ if value is None:
+ return True
+ if not hasattr(column_type, 'python_type'):
+ return True
+ column_python_type = column_type.python_type
+ if isinstance(value, column_python_type):
+ return True
+ if issubclass(column_python_type, basestring):
+ return isinstance(value, basestring)
+ if column_python_type in [int, long]:
+ return type(value) in [int, long]
+ if column_python_type in [float]:
+ return type(value) in [float]
+ if column_python_type in [bool]:
+ return type(value) in [bool]
+ return False
+
+ def validate(self):
+ """Generate validate function to make sure the record is legal."""
+ columns = self.__mapper__.columns
+ for key, column in columns.items():
+ value = getattr(self, key)
+ if not self.type_compatible(value, column.type):
+ raise exception.InvalidParameter(
+ 'column %s value %r type is unexpected: %s' % (
+ key, value, column.type
+ )
+ )
+
+ def to_dict(self):
+ """General function to convert record to dict.
+
+ Convert all columns not starting with '_' to
+ {<column_name>: <column_value>}
+ """
+ keys = self.__mapper__.columns.keys()
+ dict_info = {}
+ for key in keys:
+ if key.startswith('_'):
+ continue
+ value = getattr(self, key)
+ if value is not None:
+ if isinstance(value, datetime.datetime):
+ value = util.format_datetime(value)
+ dict_info[key] = value
+ return dict_info
+
+
+class StateMixin(TimestampMixin, HelperMixin):
+ """Provides general fields and functions for state related table."""
+
+ state = Column(
+ Enum(
+ 'UNINITIALIZED', 'INITIALIZED', 'UPDATE_PREPARING',
+ 'INSTALLING', 'SUCCESSFUL', 'ERROR'
+ ),
+ ColumnDefault('UNINITIALIZED')
+ )
+ percentage = Column(Float, default=0.0)
+ message = Column(Text, default='')
+ severity = Column(
+ Enum('INFO', 'WARNING', 'ERROR'),
+ ColumnDefault('INFO')
+ )
+ ready = Column(Boolean, default=False)
+
+ def update(self):
+ # In state table, some field information is redundant.
+ # The update function to make sure all related fields
+ # are set to correct state.
+ if self.ready:
+ self.state = 'SUCCESSFUL'
+ if self.state in ['UNINITIALIZED', 'INITIALIZED']:
+ self.percentage = 0.0
+ self.severity = 'INFO'
+ self.message = ''
+ if self.state == 'INSTALLING':
+ if self.severity == 'ERROR':
+ self.state = 'ERROR'
+ elif self.percentage >= 1.0:
+ self.state = 'SUCCESSFUL'
+ self.percentage = 1.0
+ if self.state == 'SUCCESSFUL':
+ self.percentage = 1.0
+ super(StateMixin, self).update()
+
+
+class LogHistoryMixin(TimestampMixin, HelperMixin):
+ """Provides general fields and functions for LogHistory related tables."""
+ position = Column(Integer, default=0)
+ partial_line = Column(Text, default='')
+ percentage = Column(Float, default=0.0)
+ message = Column(Text, default='')
+ severity = Column(
+ Enum('ERROR', 'WARNING', 'INFO'),
+ ColumnDefault('INFO')
+ )
+ line_matcher_name = Column(
+ String(80), default='start'
+ )
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ if not self.filename:
+ raise exception.InvalidParameter(
+ 'filename is not set in %s' % self.id
+ )
+
+
+class HostNetwork(BASE, TimestampMixin, HelperMixin):
+ """Host network table."""
+ __tablename__ = 'host_network'
+
+ id = Column(Integer, primary_key=True)
+ host_id = Column(
+ Integer,
+ ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ interface = Column(
+ String(80), nullable=False)
+ subnet_id = Column(
+ Integer,
+ ForeignKey('subnet.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ user_id = Column(Integer, ForeignKey('user.id'))
+ ip_int = Column(BigInteger, nullable=False)
+ is_mgmt = Column(Boolean, default=False)
+ is_promiscuous = Column(Boolean, default=False)
+
+ __table_args__ = (
+ UniqueConstraint('host_id', 'interface', name='interface_constraint'),
+ UniqueConstraint('ip_int', 'user_id', name='ip_constraint')
+ )
+
+ def __init__(self, host_id, interface, user_id, **kwargs):
+ self.host_id = host_id
+ self.interface = interface
+ self.user_id = user_id
+ super(HostNetwork, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'HostNetwork[%s=%s]' % (self.interface, self.ip)
+
+ @property
+ def ip(self):
+ return str(netaddr.IPAddress(self.ip_int))
+
+ @ip.setter
+ def ip(self, value):
+ self.ip_int = int(netaddr.IPAddress(value))
+
+ @property
+ def netmask(self):
+ return str(netaddr.IPNetwork(self.subnet.subnet).netmask)
+
+ def update(self):
+ self.host.config_validated = False
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(HostNetwork, self).validate()
+ if not self.subnet:
+ raise exception.InvalidParameter(
+ 'subnet is not set in %s interface %s' % (
+ self.host_id, self.interface
+ )
+ )
+ if not self.ip_int:
+ raise exception.InvalidParameter(
+ 'ip is not set in %s interface %s' % (
+ self.host_id, self.interface
+ )
+ )
+ ip = netaddr.IPAddress(self.ip_int)
+ subnet = netaddr.IPNetwork(self.subnet.subnet)
+ if ip not in subnet:
+ raise exception.InvalidParameter(
+ 'ip %s is not in subnet %s' % (
+ str(ip), str(subnet)
+ )
+ )
+
+ def to_dict(self):
+ dict_info = super(HostNetwork, self).to_dict()
+ dict_info['ip'] = self.ip
+ dict_info['interface'] = self.interface
+ dict_info['netmask'] = self.netmask
+ dict_info['subnet'] = self.subnet.subnet
+ dict_info['user_id'] = self.user_id
+ return dict_info
+
+
+class ClusterHostLogHistory(BASE, LogHistoryMixin):
+ """clusterhost installing log history for each file.
+
+ """
+ __tablename__ = 'clusterhost_log_history'
+
+ clusterhost_id = Column(
+ 'id', Integer,
+ ForeignKey('clusterhost.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+ filename = Column(String(80), primary_key=True, nullable=False)
+ cluster_id = Column(
+ Integer,
+ ForeignKey('cluster.id')
+ )
+ host_id = Column(
+ Integer,
+ ForeignKey('host.id')
+ )
+
+ def __init__(self, clusterhost_id, filename, **kwargs):
+ self.clusterhost_id = clusterhost_id
+ self.filename = filename
+ super(ClusterHostLogHistory, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'ClusterHostLogHistory[%s:%s]' % (
+ self.clusterhost_id, self.filename
+ )
+
+ def initialize(self):
+ self.cluster_id = self.clusterhost.cluster_id
+ self.host_id = self.clusterhost.host_id
+ super(ClusterHostLogHistory, self).initialize()
+
+
+class HostLogHistory(BASE, LogHistoryMixin):
+ """host installing log history for each file.
+
+ """
+ __tablename__ = 'host_log_history'
+
+ id = Column(
+ Integer,
+ ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True)
+ filename = Column(String(80), primary_key=True, nullable=False)
+
+ def __init__(self, id, filename, **kwargs):
+ self.id = id
+ self.filename = filename
+ super(HostLogHistory, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'HostLogHistory[%s:%s]' % (self.id, self.filename)
+
+
+class ClusterHostState(BASE, StateMixin):
+ """ClusterHost state table."""
+ __tablename__ = 'clusterhost_state'
+
+ id = Column(
+ Integer,
+ ForeignKey(
+ 'clusterhost.id',
+ onupdate='CASCADE', ondelete='CASCADE'
+ ),
+ primary_key=True
+ )
+
+ def __str__(self):
+ return 'ClusterHostState[%s state %s percentage %s]' % (
+ self.id, self.state, self.percentage
+ )
+
+ def update(self):
+ """Update clusterhost state.
+
+ When clusterhost state is updated, the underlying host state
+ may be updated accordingly.
+ """
+ super(ClusterHostState, self).update()
+ host_state = self.clusterhost.host.state
+ if self.state == 'INITIALIZED':
+ if host_state.state in ['UNINITIALIZED', 'UPDATE_PREPARING']:
+ host_state.state = 'INITIALIZED'
+ host_state.update()
+ elif self.state == 'INSTALLING':
+ if host_state.state in [
+ 'UNINITIALIZED', 'UPDATE_PREPARING', 'INITIALIZED'
+ ]:
+ host_state.state = 'INSTALLING'
+ host_state.update()
+ elif self.state == 'SUCCESSFUL':
+ if host_state.state != 'SUCCESSFUL':
+ host_state.state = 'SUCCESSFUL'
+ host_state.update()
+
+
+class ClusterHost(BASE, TimestampMixin, HelperMixin):
+ """ClusterHost table."""
+ __tablename__ = 'clusterhost'
+
+ clusterhost_id = Column('id', Integer, primary_key=True)
+ cluster_id = Column(
+ Integer,
+ ForeignKey('cluster.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ host_id = Column(
+ Integer,
+ ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ # the list of role names.
+ _roles = Column('roles', JSONEncoded, default=[])
+ _patched_roles = Column('patched_roles', JSONEncoded, default=[])
+ config_step = Column(String(80), default='')
+ package_config = Column(JSONEncoded, default={})
+ config_validated = Column(Boolean, default=False)
+ deployed_package_config = Column(JSONEncoded, default={})
+
+ log_histories = relationship(
+ ClusterHostLogHistory,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('clusterhost')
+ )
+
+ __table_args__ = (
+ UniqueConstraint('cluster_id', 'host_id', name='constraint'),
+ )
+
+ state = relationship(
+ ClusterHostState,
+ uselist=False,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('clusterhost')
+ )
+
+ def __init__(self, cluster_id, host_id, **kwargs):
+ self.cluster_id = cluster_id
+ self.host_id = host_id
+ self.state = ClusterHostState()
+ super(ClusterHost, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'ClusterHost[%s:%s]' % (self.clusterhost_id, self.name)
+
+ def update(self):
+ if self.host.reinstall_os:
+ if self.state in ['SUCCESSFUL', 'ERROR']:
+ if self.config_validated:
+ self.state.state = 'INITIALIZED'
+ else:
+ self.state.state = 'UNINITIALIZED'
+ self.cluster.update()
+ self.host.update()
+ self.state.update()
+ super(ClusterHost, self).update()
+
+ @property
+ def name(self):
+ return '%s.%s' % (self.host.name, self.cluster.name)
+
+ @property
+ def patched_package_config(self):
+ return self.package_config
+
+ @patched_package_config.setter
+ def patched_package_config(self, value):
+ package_config = copy.deepcopy(self.package_config)
+ self.package_config = util.merge_dict(package_config, value)
+ logging.debug(
+ 'patch clusterhost %s package_config: %s',
+ self.clusterhost_id, value
+ )
+ self.config_validated = False
+
+ @property
+ def put_package_config(self):
+ return self.package_config
+
+ @put_package_config.setter
+ def put_package_config(self, value):
+ package_config = copy.deepcopy(self.package_config)
+ package_config.update(value)
+ self.package_config = package_config
+ logging.debug(
+ 'put clusterhost %s package_config: %s',
+ self.clusterhost_id, value
+ )
+ self.config_validated = False
+
+ @property
+ def patched_os_config(self):
+ return self.host.os_config
+
+ @patched_os_config.setter
+ def patched_os_config(self, value):
+ host = self.host
+ host.patched_os_config = value
+
+ @property
+ def put_os_config(self):
+ return self.host.os_config
+
+ @put_os_config.setter
+ def put_os_config(self, value):
+ host = self.host
+ host.put_os_config = value
+
+ @property
+ def deployed_os_config(self):
+ return self.host.deployed_os_config
+
+ @deployed_os_config.setter
+ def deployed_os_config(self, value):
+ host = self.host
+ host.deployed_os_config = value
+
+ @hybrid_property
+ def os_name(self):
+ return self.host.os_name
+
+ @os_name.expression
+ def os_name(cls):
+ return cls.host.os_name
+
+ @hybrid_property
+ def clustername(self):
+ return self.cluster.name
+
+ @clustername.expression
+ def clustername(cls):
+ return cls.cluster.name
+
+ @hybrid_property
+ def hostname(self):
+ return self.host.hostname
+
+ @hostname.expression
+ def hostname(cls):
+ return Host.hostname
+
+ @property
+ def distributed_system_installed(self):
+ return self.state.state == 'SUCCESSFUL'
+
+ @property
+ def resintall_os(self):
+ return self.host.reinstall_os
+
+ @property
+ def reinstall_distributed_system(self):
+ return self.cluster.reinstall_distributed_system
+
+ @property
+ def os_installed(self):
+ return self.host.os_installed
+
+ @property
+ def roles(self):
+ # only the role exists in flavor roles will be returned.
+ # the role will be sorted as the order defined in flavor
+ # roles.
+ # duplicate role names will be removed.
+ # The returned value is a list of dict like
+ # [{'name': 'allinone', 'optional': False}]
+ role_names = list(self._roles)
+ if not role_names:
+ return []
+ cluster_roles = self.cluster.flavor['roles']
+ if not cluster_roles:
+ return []
+ roles = []
+ for cluster_role in cluster_roles:
+ if cluster_role['name'] in role_names:
+ roles.append(cluster_role)
+ return roles
+
+ @roles.setter
+ def roles(self, value):
+ """value should be a list of role name."""
+ self._roles = list(value)
+ self.config_validated = False
+
+ @property
+ def patched_roles(self):
+ patched_role_names = list(self._patched_roles)
+ if not patched_role_names:
+ return []
+ cluster_roles = self.cluster.flavor['roles']
+ if not cluster_roles:
+ return []
+ roles = []
+ for cluster_role in cluster_roles:
+ if cluster_role['name'] in patched_role_names:
+ roles.append(cluster_role)
+ return roles
+
+ @patched_roles.setter
+ def patched_roles(self, value):
+ """value should be a list of role name."""
+ # if value is an empty list, we empty the field
+ if value:
+ roles = list(self._roles)
+ roles.extend(value)
+ self._roles = roles
+ patched_roles = list(self._patched_roles)
+ patched_roles.extend(value)
+ self._patched_roles = patched_roles
+ self.config_validated = False
+ else:
+ self._patched_roles = list(value)
+ self.config_validated = False
+
+ @hybrid_property
+ def owner(self):
+ return self.cluster.owner
+
+ @owner.expression
+ def owner(cls):
+ return cls.cluster.owner
+
+ def state_dict(self):
+ """Get clusterhost state dict.
+
+ The clusterhost state_dict is different from
+ clusterhost.state.to_dict. The main difference is state_dict
+ show the progress of both installing os on host and installing
+ distributed system on clusterhost. While clusterhost.state.to_dict
+ only shows the progress of installing distributed system on
+ clusterhost.
+ """
+ cluster = self.cluster
+ host = self.host
+ host_state = host.state_dict()
+ if not cluster.flavor_name:
+ return host_state
+ clusterhost_state = self.state.to_dict()
+ if clusterhost_state['state'] in ['ERROR', 'SUCCESSFUL']:
+ return clusterhost_state
+ if (
+ clusterhost_state['state'] in 'INSTALLING' and
+ clusterhost_state['percentage'] > 0
+ ):
+ clusterhost_state['percentage'] = min(
+ 1.0, (
+ 0.5 + clusterhost_state['percentage'] / 2
+ )
+ )
+ return clusterhost_state
+
+ host_state['percentage'] = host_state['percentage'] / 2
+ if host_state['state'] == 'SUCCESSFUL':
+ host_state['state'] = 'INSTALLING'
+ return host_state
+
+ def to_dict(self):
+ dict_info = self.host.to_dict()
+ dict_info.update(super(ClusterHost, self).to_dict())
+ state_dict = self.state_dict()
+ dict_info.update({
+ 'distributed_system_installed': self.distributed_system_installed,
+ 'reinstall_distributed_system': self.reinstall_distributed_system,
+ 'owner': self.owner,
+ 'clustername': self.clustername,
+ 'name': self.name,
+ 'state': state_dict['state']
+ })
+ dict_info['roles'] = self.roles
+ dict_info['patched_roles'] = self.patched_roles
+ return dict_info
+
+
+class HostState(BASE, StateMixin):
+ """Host state table."""
+ __tablename__ = 'host_state'
+
+ id = Column(
+ Integer,
+ ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+
+ def __str__(self):
+ return 'HostState[%s state %s percentage %s]' % (
+ self.id, self.state, self.percentage
+ )
+
+ def update(self):
+ """Update host state.
+
+ When host state is updated, all clusterhosts on the
+ host will update their state if necessary.
+ """
+ super(HostState, self).update()
+ host = self.host
+ if self.state == 'INSTALLING':
+ host.reinstall_os = False
+ for clusterhost in self.host.clusterhosts:
+ if clusterhost.state in [
+ 'SUCCESSFUL', 'ERROR'
+ ]:
+ clusterhost.state = 'INSTALLING'
+ clusterhost.state.update()
+ elif self.state == 'UNINITIALIZED':
+ for clusterhost in self.host.clusterhosts:
+ if clusterhost.state in [
+ 'INITIALIZED', 'INSTALLING', 'SUCCESSFUL', 'ERROR'
+ ]:
+ clusterhost.state = 'UNINITIALIZED'
+ clusterhost.state.update()
+ elif self.state == 'UPDATE_PREPARING':
+ for clusterhost in self.host.clusterhosts:
+ if clusterhost.state in [
+ 'INITIALIZED', 'INSTALLING', 'SUCCESSFUL', 'ERROR'
+ ]:
+ clusterhost.state = 'UPDATE_PREPARING'
+ clusterhost.state.update()
+ elif self.state == 'INITIALIZED':
+ for clusterhost in self.host.clusterhosts:
+ if clusterhost.state in [
+ 'INSTALLING', 'SUCCESSFUL', 'ERROR'
+ ]:
+ clusterhost.state = 'INITIALIZED'
+ clusterhost.state.update()
+
+
+class Host(BASE, TimestampMixin, HelperMixin):
+ """Host table."""
+ __tablename__ = 'host'
+
+ name = Column(String(80), nullable=True)
+ config_step = Column(String(80), default='')
+ os_config = Column(JSONEncoded, default={})
+ config_validated = Column(Boolean, default=False)
+ deployed_os_config = Column(JSONEncoded, default={})
+ os_name = Column(String(80))
+ creator_id = Column(Integer, ForeignKey('user.id'))
+ owner = Column(String(80))
+ os_installer = Column(JSONEncoded, default={})
+
+ __table_args__ = (
+ UniqueConstraint('name', 'owner', name='constraint'),
+ )
+
+ id = Column(
+ Integer,
+ ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+ reinstall_os = Column(Boolean, default=True)
+
+ host_networks = relationship(
+ HostNetwork,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('host')
+ )
+ clusterhosts = relationship(
+ ClusterHost,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('host')
+ )
+ state = relationship(
+ HostState,
+ uselist=False,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('host')
+ )
+ log_histories = relationship(
+ HostLogHistory,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('host')
+ )
+
+ def __str__(self):
+ return 'Host[%s:%s]' % (self.id, self.name)
+
+ @hybrid_property
+ def mac(self):
+ machine = self.machine
+ if machine:
+ return machine.mac
+ else:
+ return None
+
+ @property
+ def os_id(self):
+ return self.os_name
+
+ @os_id.setter
+ def os_id(self, value):
+ self.os_name = value
+
+ @hybrid_property
+ def hostname(self):
+ return self.name
+
+ @hostname.expression
+ def hostname(cls):
+ return cls.name
+
+ @property
+ def patched_os_config(self):
+ return self.os_config
+
+ @patched_os_config.setter
+ def patched_os_config(self, value):
+ os_config = copy.deepcopy(self.os_config)
+ self.os_config = util.merge_dict(os_config, value)
+ logging.debug('patch host os config in %s: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def put_os_config(self):
+ return self.os_config
+
+ @put_os_config.setter
+ def put_os_config(self, value):
+ os_config = copy.deepcopy(self.os_config)
+ os_config.update(value)
+ self.os_config = os_config
+ logging.debug('put host os config in %s: %s', self.id, value)
+ self.config_validated = False
+
+ def __init__(self, id, **kwargs):
+ self.id = id
+ self.state = HostState()
+ super(Host, self).__init__(**kwargs)
+
+ def update(self):
+ creator = self.creator
+ if creator:
+ self.owner = creator.email
+ if self.reinstall_os:
+ if self.state in ['SUCCESSFUL', 'ERROR']:
+ if self.config_validated:
+ self.state.state = 'INITIALIZED'
+ else:
+ self.state.state = 'UNINITIALIZED'
+ self.state.update()
+ self.state.update()
+ super(Host, self).update()
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to the column in future.
+ super(Host, self).validate()
+ creator = self.creator
+ if not creator:
+ raise exception.InvalidParameter(
+ 'creator is not set in host %s' % self.id
+ )
+ os_name = self.os_name
+ if not os_name:
+ raise exception.InvalidParameter(
+ 'os is not set in host %s' % self.id
+ )
+ os_installer = self.os_installer
+ if not os_installer:
+ raise exception.Invalidparameter(
+ 'os_installer is not set in host %s' % self.id
+ )
+
+ @property
+ def os_installed(self):
+ return self.state.state == 'SUCCESSFUL'
+
+ @property
+ def clusters(self):
+ return [clusterhost.cluster for clusterhost in self.clusterhosts]
+
+ def state_dict(self):
+ return self.state.to_dict()
+
+ def to_dict(self):
+ """Host dict contains its underlying machine dict."""
+ dict_info = self.machine.to_dict()
+ dict_info.update(super(Host, self).to_dict())
+ state_dict = self.state_dict()
+ ip = None
+ for host_network in self.host_networks:
+ if host_network.is_mgmt:
+ ip = host_network.ip
+ dict_info.update({
+ 'machine_id': self.machine.id,
+ 'os_installed': self.os_installed,
+ 'hostname': self.hostname,
+ 'ip': ip,
+ 'networks': [
+ host_network.to_dict()
+ for host_network in self.host_networks
+ ],
+ 'os_id': self.os_id,
+ 'clusters': [cluster.to_dict() for cluster in self.clusters],
+ 'state': state_dict['state']
+ })
+ return dict_info
+
+
+class ClusterState(BASE, StateMixin):
+ """Cluster state table."""
+ __tablename__ = 'cluster_state'
+
+ id = Column(
+ Integer,
+ ForeignKey('cluster.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+ total_hosts = Column(
+ Integer,
+ default=0
+ )
+ installing_hosts = Column(
+ Integer,
+ default=0
+ )
+ completed_hosts = Column(
+ Integer,
+ default=0
+ )
+ failed_hosts = Column(
+ Integer,
+ default=0
+ )
+
+ def __init__(self, **kwargs):
+ super(ClusterState, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'ClusterState[%s state %s percentage %s]' % (
+ self.id, self.state, self.percentage
+ )
+
+ def to_dict(self):
+ dict_info = super(ClusterState, self).to_dict()
+ dict_info['status'] = {
+ 'total_hosts': self.total_hosts,
+ 'installing_hosts': self.installing_hosts,
+ 'completed_hosts': self.completed_hosts,
+ 'failed_hosts': self.failed_hosts
+ }
+ return dict_info
+
+ def update(self):
+ # all fields of cluster state should be calculated by
+ # its each underlying clusterhost state.
+ cluster = self.cluster
+ clusterhosts = cluster.clusterhosts
+ self.total_hosts = len(clusterhosts)
+ self.installing_hosts = 0
+ self.failed_hosts = 0
+ self.completed_hosts = 0
+ if not cluster.flavor_name:
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ host_state = host.state.state
+ if host_state == 'INSTALLING':
+ self.installing_hosts += 1
+ elif host_state == 'ERROR':
+ self.failed_hosts += 1
+ elif host_state == 'SUCCESSFUL':
+ self.completed_hosts += 1
+ else:
+ for clusterhost in clusterhosts:
+ clusterhost_state = clusterhost.state.state
+ if clusterhost_state == 'INSTALLING':
+ self.installing_hosts += 1
+ elif clusterhost_state == 'ERROR':
+ self.failed_hosts += 1
+ elif clusterhost_state == 'SUCCESSFUL':
+ self.completed_hosts += 1
+ if self.total_hosts:
+ if self.completed_hosts == self.total_hosts:
+ self.percentage = 1.0
+ else:
+ self.percentage = (
+ float(self.completed_hosts)
+ /
+ float(self.total_hosts)
+ )
+ if self.state == 'SUCCESSFUL':
+ self.state = 'INSTALLING'
+ self.ready = False
+ self.message = (
+ 'total %s, installing %s, completed: %s, error %s'
+ ) % (
+ self.total_hosts, self.installing_hosts,
+ self.completed_hosts, self.failed_hosts
+ )
+ if self.failed_hosts:
+ self.severity = 'ERROR'
+
+ super(ClusterState, self).update()
+ if self.state == 'INSTALLING':
+ cluster.reinstall_distributed_system = False
+
+
+class Cluster(BASE, TimestampMixin, HelperMixin):
+ """Cluster table."""
+ __tablename__ = 'cluster'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), nullable=False)
+ reinstall_distributed_system = Column(Boolean, default=True)
+ config_step = Column(String(80), default='')
+ os_name = Column(String(80))
+ flavor_name = Column(String(80), nullable=True)
+ # flavor dict got from flavor id.
+ flavor = Column(JSONEncoded, default={})
+ os_config = Column(JSONEncoded, default={})
+ package_config = Column(JSONEncoded, default={})
+ deployed_os_config = Column(JSONEncoded, default={})
+ deployed_package_config = Column(JSONEncoded, default={})
+ config_validated = Column(Boolean, default=False)
+ adapter_name = Column(String(80))
+ creator_id = Column(Integer, ForeignKey('user.id'))
+ owner = Column(String(80))
+ clusterhosts = relationship(
+ ClusterHost,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('cluster')
+ )
+ state = relationship(
+ ClusterState,
+ uselist=False,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('cluster')
+ )
+ __table_args__ = (
+ UniqueConstraint('name', 'creator_id', name='constraint'),
+ )
+
+ def __init__(self, name, creator_id, **kwargs):
+ self.name = name
+ self.creator_id = creator_id
+ self.state = ClusterState()
+ super(Cluster, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'Cluster[%s:%s]' % (self.id, self.name)
+
+ def update(self):
+ creator = self.creator
+ if creator:
+ self.owner = creator.email
+ if self.reinstall_distributed_system:
+ if self.state in ['SUCCESSFUL', 'ERROR']:
+ if self.config_validated:
+ self.state.state = 'INITIALIZED'
+ else:
+ self.state.state = 'UNINITIALIZED'
+ self.state.update()
+ self.state.update()
+ super(Cluster, self).update()
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(Cluster, self).validate()
+ creator = self.creator
+ if not creator:
+ raise exception.InvalidParameter(
+ 'creator is not set in cluster %s' % self.id
+ )
+ os_name = self.os_name
+ if not os_name:
+ raise exception.InvalidParameter(
+ 'os is not set in cluster %s' % self.id
+ )
+ adapter_name = self.adapter_name
+ if not adapter_name:
+ raise exception.InvalidParameter(
+ 'adapter is not set in cluster %s' % self.id
+ )
+ flavor_name = self.flavor_name
+ if flavor_name:
+ if 'name' not in self.flavor:
+ raise exception.InvalidParameter(
+ 'key name does not exist in flavor %s' % (
+ self.flavor
+ )
+ )
+ if flavor_name != self.flavor['name']:
+ raise exception.InvalidParameter(
+ 'flavor name %s is not match '
+ 'the name key in flavor %s' % (
+ flavor_name, self.flavor
+ )
+ )
+ else:
+ if self.flavor:
+ raise exception.InvalidParameter(
+ 'flavor %s is not empty' % self.flavor
+ )
+
+ @property
+ def os_id(self):
+ return self.os_name
+
+ @os_id.setter
+ def os_id(self, value):
+ self.os_name = value
+
+ @property
+ def adapter_id(self):
+ return self.adapter_name
+
+ @adapter_id.setter
+ def adapter_id(self, value):
+ self.adapter_name = value
+
+ @property
+ def flavor_id(self):
+ if self.flavor_name:
+ return '%s:%s' % (self.adapter_name, self.flavor_name)
+ else:
+ return None
+
+ @flavor_id.setter
+ def flavor_id(self, value):
+ if value:
+ _, flavor_name = value.split(':', 1)
+ self.flavor_name = flavor_name
+ else:
+ self.flavor_name = value
+
+ @property
+ def patched_os_config(self):
+ return self.os_config
+
+ @patched_os_config.setter
+ def patched_os_config(self, value):
+ os_config = copy.deepcopy(self.os_config)
+ self.os_config = util.merge_dict(os_config, value)
+ logging.debug('patch cluster %s os config: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def put_os_config(self):
+ return self.os_config
+
+ @put_os_config.setter
+ def put_os_config(self, value):
+ os_config = copy.deepcopy(self.os_config)
+ os_config.update(value)
+ self.os_config = os_config
+ logging.debug('put cluster %s os config: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def patched_package_config(self):
+ return self.package_config
+
+ @patched_package_config.setter
+ def patched_package_config(self, value):
+ package_config = copy.deepcopy(self.package_config)
+ self.package_config = util.merge_dict(package_config, value)
+ logging.debug('patch cluster %s package config: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def put_package_config(self):
+ return self.package_config
+
+ @put_package_config.setter
+ def put_package_config(self, value):
+ package_config = dict(self.package_config)
+ package_config.update(value)
+ self.package_config = package_config
+ logging.debug('put cluster %s package config: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def distributed_system_installed(self):
+ return self.state.state == 'SUCCESSFUL'
+
+ def state_dict(self):
+ return self.state.to_dict()
+
+ def to_dict(self):
+ dict_info = super(Cluster, self).to_dict()
+ dict_info['distributed_system_installed'] = (
+ self.distributed_system_installed
+ )
+ dict_info['os_id'] = self.os_id
+ dict_info['adapter_id'] = self.adapter_id
+ dict_info['flavor_id'] = self.flavor_id
+ return dict_info
+
+
+# User, Permission relation table
+class UserPermission(BASE, HelperMixin, TimestampMixin):
+ """User permission table."""
+ __tablename__ = 'user_permission'
+ id = Column(Integer, primary_key=True)
+ user_id = Column(
+ Integer,
+ ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ permission_id = Column(
+ Integer,
+ ForeignKey('permission.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ __table_args__ = (
+ UniqueConstraint('user_id', 'permission_id', name='constraint'),
+ )
+
+ def __init__(self, user_id, permission_id, **kwargs):
+ self.user_id = user_id
+ self.permission_id = permission_id
+
+ def __str__(self):
+ return 'UserPermission[%s:%s]' % (self.id, self.name)
+
+ @hybrid_property
+ def name(self):
+ return self.permission.name
+
+ def to_dict(self):
+ dict_info = self.permission.to_dict()
+ dict_info.update(super(UserPermission, self).to_dict())
+ return dict_info
+
+
+class Permission(BASE, HelperMixin, TimestampMixin):
+ """Permission table."""
+ __tablename__ = 'permission'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True, nullable=False)
+ alias = Column(String(100))
+ description = Column(Text)
+ user_permissions = relationship(
+ UserPermission,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('permission')
+ )
+
+ def __init__(self, name, **kwargs):
+ self.name = name
+ super(Permission, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'Permission[%s:%s]' % (self.id, self.name)
+
+
+class UserToken(BASE, HelperMixin):
+ """user token table."""
+ __tablename__ = 'user_token'
+
+ id = Column(Integer, primary_key=True)
+ user_id = Column(
+ Integer,
+ ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ token = Column(String(256), unique=True, nullable=False)
+ expire_timestamp = Column(DateTime, nullable=True)
+
+ def __init__(self, token, **kwargs):
+ self.token = token
+ super(UserToken, self).__init__(**kwargs)
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(UserToken, self).validate()
+ if not self.user:
+ raise exception.InvalidParameter(
+ 'user is not set in token: %s' % self.token
+ )
+
+
+class UserLog(BASE, HelperMixin):
+ """User log table."""
+ __tablename__ = 'user_log'
+
+ id = Column(Integer, primary_key=True)
+ user_id = Column(
+ Integer,
+ ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ action = Column(Text)
+ timestamp = Column(DateTime, default=lambda: datetime.datetime.now())
+
+ @hybrid_property
+ def user_email(self):
+ return self.user.email
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(UserLog, self).validate()
+ if not self.user:
+ raise exception.InvalidParameter(
+ 'user is not set in user log: %s' % self.id
+ )
+
+
+class User(BASE, HelperMixin, TimestampMixin):
+ """User table."""
+ __tablename__ = 'user'
+
+ id = Column(Integer, primary_key=True)
+ email = Column(String(80), unique=True, nullable=False)
+ crypted_password = Column('password', String(225))
+ firstname = Column(String(80))
+ lastname = Column(String(80))
+ is_admin = Column(Boolean, default=False)
+ active = Column(Boolean, default=True)
+ user_permissions = relationship(
+ UserPermission,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('user')
+ )
+ user_logs = relationship(
+ UserLog,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('user')
+ )
+ user_tokens = relationship(
+ UserToken,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('user')
+ )
+ clusters = relationship(
+ Cluster,
+ backref=backref('creator')
+ )
+ hosts = relationship(
+ Host,
+ backref=backref('creator')
+ )
+
+ def __init__(self, email, **kwargs):
+ self.email = email
+ super(User, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'User[%s]' % self.email
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(User, self).validate()
+ if not self.crypted_password:
+ raise exception.InvalidParameter(
+ 'password is not set in user : %s' % self.email
+ )
+
+ @property
+ def password(self):
+ return '***********'
+
+ @password.setter
+ def password(self, password):
+ # password stored in database is crypted.
+ self.crypted_password = util.encrypt(password)
+
+ @hybrid_property
+ def permissions(self):
+ permissions = []
+ for user_permission in self.user_permissions:
+ permissions.append(user_permission.permission)
+
+ return permissions
+
+ def to_dict(self):
+ dict_info = super(User, self).to_dict()
+ dict_info['permissions'] = [
+ permission.to_dict()
+ for permission in self.permissions
+ ]
+ return dict_info
+
+
+class SwitchMachine(BASE, HelperMixin, TimestampMixin):
+ """Switch Machine table."""
+ __tablename__ = 'switch_machine'
+ switch_machine_id = Column(
+ 'id', Integer, primary_key=True
+ )
+ switch_id = Column(
+ Integer,
+ ForeignKey('switch.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ machine_id = Column(
+ Integer,
+ ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ owner_id = Column(Integer, ForeignKey('user.id'))
+ port = Column(String(80), nullable=True)
+ vlans = Column(JSONEncoded, default=[])
+ __table_args__ = (
+ UniqueConstraint('switch_id', 'machine_id', name='constraint'),
+ )
+
+ def __init__(self, switch_id, machine_id, **kwargs):
+ self.switch_id = switch_id
+ self.machine_id = machine_id
+ super(SwitchMachine, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'SwitchMachine[%s port %s]' % (
+ self.switch_machine_id, self.port
+ )
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(SwitchMachine, self).validate()
+ if not self.switch:
+ raise exception.InvalidParameter(
+ 'switch is not set in %s' % self.id
+ )
+ if not self.machine:
+ raise exception.Invalidparameter(
+ 'machine is not set in %s' % self.id
+ )
+ if not self.port:
+ raise exception.InvalidParameter(
+ 'port is not set in %s' % self.id
+ )
+
+ @hybrid_property
+ def mac(self):
+ return self.machine.mac
+
+ @hybrid_property
+ def tag(self):
+ return self.machine.tag
+
+ @property
+ def switch_ip(self):
+ return self.switch.ip
+
+ @hybrid_property
+ def switch_ip_int(self):
+ return self.switch.ip_int
+
+ @switch_ip_int.expression
+ def switch_ip_int(cls):
+ return Switch.ip_int
+
+ @hybrid_property
+ def switch_vendor(self):
+ return self.switch.vendor
+
+ @switch_vendor.expression
+ def switch_vendor(cls):
+ return Switch.vendor
+
+ @property
+ def patched_vlans(self):
+ return self.vlans
+
+ @patched_vlans.setter
+ def patched_vlans(self, value):
+ if not value:
+ return
+ vlans = list(self.vlans)
+ for item in value:
+ if item not in vlans:
+ vlans.append(item)
+ self.vlans = vlans
+
+ @property
+ def filtered(self):
+ """Check if switch machine should be filtered.
+
+ port should be composed with <port_prefix><port_number><port_suffix>
+ For each filter in switch machine filters,
+ if filter_type is allow and port match the pattern, the switch
+ machine is allowed to be got by api. If filter_type is deny and
+ port match the pattern, the switch machine is not allowed to be got
+ by api.
+ If not filter is matched, if the last filter is allow, deny all
+ unmatched switch machines, if the last filter is deny, allow all
+ unmatched switch machines.
+ If no filter defined, allow all switch machines.
+ if ports defined in filter and 'all' in ports, the switch machine is
+ matched. if ports defined in filter and 'all' not in ports,
+ the switch machine with the port name in ports will be matched.
+ If the port pattern matches
+ <<port_prefix><port_number><port_suffix> and port number is in the
+ range of [port_start, port_end], the switch machine is matched.
+ """
+ filters = self.switch.machine_filters
+ port = self.port
+ unmatched_allowed = True
+ ports_pattern = re.compile(r'(\D*)(\d+)-(\d+)(\D*)')
+ port_pattern = re.compile(r'(\D*)(\d+)(\D*)')
+ port_match = port_pattern.match(port)
+ if port_match:
+ port_prefix = port_match.group(1)
+ port_number = int(port_match.group(2))
+ port_suffix = port_match.group(3)
+ else:
+ port_prefix = ''
+ port_number = 0
+ port_suffix = ''
+ for port_filter in filters:
+ filter_type = port_filter.get('filter_type', 'allow')
+ denied = filter_type != 'allow'
+ unmatched_allowed = denied
+ if 'ports' in port_filter:
+ if 'all' in port_filter['ports']:
+ return denied
+ if port in port_filter['ports']:
+ return denied
+ if port_match:
+ for port_or_ports in port_filter['ports']:
+ ports_match = ports_pattern.match(port_or_ports)
+ if ports_match:
+ filter_port_prefix = ports_match.group(1)
+ filter_port_start = int(ports_match.group(2))
+ filter_port_end = int(ports_match.group(3))
+ filter_port_suffix = ports_match.group(4)
+ if (
+ filter_port_prefix == port_prefix and
+ filter_port_suffix == port_suffix and
+ filter_port_start <= port_number and
+ port_number <= filter_port_end
+ ):
+ return denied
+ else:
+ filter_port_prefix = port_filter.get('port_prefix', '')
+ filter_port_suffix = port_filter.get('port_suffix', '')
+ if (
+ port_match and
+ port_prefix == filter_port_prefix and
+ port_suffix == filter_port_suffix
+ ):
+ if (
+ 'port_start' not in port_filter or
+ port_number >= port_filter['port_start']
+ ) and (
+ 'port_end' not in port_filter or
+ port_number <= port_filter['port_end']
+ ):
+ return denied
+ return not unmatched_allowed
+
+ def to_dict(self):
+ dict_info = self.machine.to_dict()
+ dict_info.update(super(SwitchMachine, self).to_dict())
+ dict_info['switch_ip'] = self.switch.ip
+ return dict_info
+
+
+class Machine(BASE, HelperMixin, TimestampMixin):
+ """Machine table."""
+ __tablename__ = 'machine'
+ id = Column(Integer, primary_key=True)
+ mac = Column(String(24), unique=True, nullable=False)
+ ipmi_credentials = Column(JSONEncoded, default={})
+ tag = Column(JSONEncoded, default={})
+ location = Column(JSONEncoded, default={})
+ owner_id = Column(Integer, ForeignKey('user.id'))
+ machine_attributes = Column(JSONEncoded, default={})
+
+ switch_machines = relationship(
+ SwitchMachine,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('machine')
+ )
+ host = relationship(
+ Host,
+ uselist=False,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('machine')
+ )
+
+ def __init__(self, mac, **kwargs):
+ self.mac = mac
+ super(Machine, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'Machine[%s:%s]' % (self.id, self.mac)
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(Machine, self).validate()
+ try:
+ netaddr.EUI(self.mac)
+ except Exception:
+ raise exception.InvalidParameter(
+ 'mac address %s format uncorrect' % self.mac
+ )
+
+ @property
+ def patched_ipmi_credentials(self):
+ return self.ipmi_credentials
+
+ @patched_ipmi_credentials.setter
+ def patched_ipmi_credentials(self, value):
+ if not value:
+ return
+ ipmi_credentials = copy.deepcopy(self.ipmi_credentials)
+ self.ipmi_credentials = util.merge_dict(ipmi_credentials, value)
+
+ @property
+ def patched_tag(self):
+ return self.tag
+
+ @patched_tag.setter
+ def patched_tag(self, value):
+ if not value:
+ return
+ tag = copy.deepcopy(self.tag)
+ tag.update(value)
+ self.tag = value
+
+ @property
+ def patched_location(self):
+ return self.location
+
+ @patched_location.setter
+ def patched_location(self, value):
+ if not value:
+ return
+ location = copy.deepcopy(self.location)
+ location.update(value)
+ self.location = location
+
+ def to_dict(self):
+ # TODO(xicheng): move the filling of switches
+ # to db/api.
+ dict_info = {}
+ dict_info['switches'] = [
+ {
+ 'switch_ip': switch_machine.switch_ip,
+ 'port': switch_machine.port,
+ 'vlans': switch_machine.vlans
+ }
+ for switch_machine in self.switch_machines
+ if not switch_machine.filtered
+ ]
+ if dict_info['switches']:
+ dict_info.update(dict_info['switches'][0])
+ dict_info.update(super(Machine, self).to_dict())
+ return dict_info
+
+
+class Switch(BASE, HelperMixin, TimestampMixin):
+ """Switch table."""
+ __tablename__ = 'switch'
+ id = Column(Integer, primary_key=True)
+ ip_int = Column('ip', BigInteger, unique=True, nullable=False)
+ credentials = Column(JSONEncoded, default={})
+ vendor = Column(String(256), nullable=True)
+ state = Column(Enum('initialized', 'unreachable', 'notsupported',
+ 'repolling', 'error', 'under_monitoring',
+ name='switch_state'),
+ ColumnDefault('initialized'))
+ # filters is json formatted list, each element has following format:
+ # keys: ['filter_type', 'ports', 'port_prefix', 'port_suffix',
+ # 'port_start', 'port_end'].
+ # each port name is divided into <port_prefix><port_number><port_suffix>
+ # filter_type is one of ['allow', 'deny'], default is 'allow'
+ # ports is a list of port name.
+ # port_prefix is the prefix that filtered port should start with.
+ # port_suffix is the suffix that filtered posrt should end with.
+ # port_start is integer that the port number should start with.
+ # port_end is the integer that the port number should end with.
+ _filters = Column('filters', JSONEncoded, default=[])
+ switch_machines = relationship(
+ SwitchMachine,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('switch')
+ )
+
+ def __str__(self):
+ return 'Switch[%s:%s]' % (self.id, self.ip)
+
+ @classmethod
+ def parse_filters(cls, filters):
+ """parse filters set from outside to standard format.
+
+ api can set switch filters with the flexible format, this
+ function will parse the flexible format filters.
+
+ Supported format:
+ as string:
+ allow ports ae10,ae20
+ allow port_prefix ae port_start 30 port_end 40
+ deny ports all
+ as python object:
+ [{
+ 'filter_type': 'allow',
+ 'ports': ['ae10', 'ae20']
+ },{
+ 'filter_type': 'allow',
+ 'port_prefix': 'ae',
+ 'port_suffix': '',
+ 'port_start': 30,
+ 'port_end': 40
+ },{
+ 'filter_type': 'deny',
+ 'ports': ['all']
+ }]
+ """
+ if isinstance(filters, basestring):
+ filters = filters.replace('\r\n', '\n').replace('\n', ';')
+ filters = [
+ machine_filter for machine_filter in filters.split(';')
+ if machine_filter
+ ]
+ if not isinstance(filters, list):
+ filters = [filters]
+ machine_filters = []
+ for machine_filter in filters:
+ if not machine_filter:
+ continue
+ if isinstance(machine_filter, basestring):
+ filter_dict = {}
+ filter_items = [
+ item for item in machine_filter.split() if item
+ ]
+ if filter_items[0] in ['allow', 'deny']:
+ filter_dict['filter_type'] = filter_items[0]
+ filter_items = filter_items[1:]
+ elif filter_items[0] not in [
+ 'ports', 'port_prefix', 'port_suffix',
+ 'port_start', 'port_end'
+ ]:
+ raise exception.InvalidParameter(
+ 'unrecognized filter type %s' % filter_items[0]
+ )
+ while filter_items:
+ if len(filter_items) >= 2:
+ filter_dict[filter_items[0]] = filter_items[1]
+ filter_items = filter_items[2:]
+ else:
+ filter_dict[filter_items[0]] = ''
+ filter_items = filter_items[1:]
+ machine_filter = filter_dict
+ if not isinstance(machine_filter, dict):
+ raise exception.InvalidParameter(
+ 'filter %s is not dict' % machine_filter
+ )
+ if 'filter_type' in machine_filter:
+ if machine_filter['filter_type'] not in ['allow', 'deny']:
+ raise exception.InvalidParameter(
+ 'filter_type should be `allow` or `deny` in %s' % (
+ machine_filter
+ )
+ )
+ if 'ports' in machine_filter:
+ if isinstance(machine_filter['ports'], basestring):
+ machine_filter['ports'] = [
+ port_or_ports
+ for port_or_ports in machine_filter['ports'].split(',')
+ if port_or_ports
+ ]
+ if not isinstance(machine_filter['ports'], list):
+ raise exception.InvalidParameter(
+ '`ports` type is not list in filter %s' % (
+ machine_filter
+ )
+ )
+ for port_or_ports in machine_filter['ports']:
+ if not isinstance(port_or_ports, basestring):
+ raise exception.InvalidParameter(
+ '%s type is not basestring in `ports` %s' % (
+ port_or_ports, machine_filter['ports']
+ )
+ )
+ for key in ['port_start', 'port_end']:
+ if key in machine_filter:
+ if isinstance(machine_filter[key], basestring):
+ if machine_filter[key].isdigit():
+ machine_filter[key] = int(machine_filter[key])
+ if not isinstance(machine_filter[key], (int, long)):
+ raise exception.InvalidParameter(
+ '`%s` type is not int in filer %s' % (
+ key, machine_filter
+ )
+ )
+ machine_filters.append(machine_filter)
+ return machine_filters
+
+ @classmethod
+ def format_filters(cls, filters):
+ """format json formatted filters to string."""
+ filter_strs = []
+ for machine_filter in filters:
+ filter_properties = []
+ filter_properties.append(
+ machine_filter.get('filter_type', 'allow')
+ )
+ if 'ports' in machine_filter:
+ filter_properties.append(
+ 'ports ' + ','.join(machine_filter['ports'])
+ )
+ if 'port_prefix' in machine_filter:
+ filter_properties.append(
+ 'port_prefix ' + machine_filter['port_prefix']
+ )
+ if 'port_suffix' in machine_filter:
+ filter_properties.append(
+ 'port_suffix ' + machine_filter['port_suffix']
+ )
+ if 'port_start' in machine_filter:
+ filter_properties.append(
+ 'port_start ' + str(machine_filter['port_start'])
+ )
+ if 'port_end' in machine_filter:
+ filter_properties.append(
+ 'port_end ' + str(machine_filter['port_end'])
+ )
+ filter_strs.append(' '.join(filter_properties))
+ return ';'.join(filter_strs)
+
+ def __init__(self, ip_int, **kwargs):
+ self.ip_int = ip_int
+ super(Switch, self).__init__(**kwargs)
+
+ @property
+ def ip(self):
+ return str(netaddr.IPAddress(self.ip_int))
+
+ @ip.setter
+ def ip(self, ipaddr):
+ self.ip_int = int(netaddr.IPAddress(ipaddr))
+
+ @property
+ def patched_credentials(self):
+ return self.credentials
+
+ @patched_credentials.setter
+ def patched_credentials(self, value):
+ if not value:
+ return
+ credentials = copy.deepcopy(self.credentials)
+ self.credentials = util.merge_dict(credentials, value)
+
+ @property
+ def machine_filters(self):
+ return self._filters
+
+ @machine_filters.setter
+ def machine_filters(self, value):
+ if not value:
+ return
+ self._filters = self.parse_filters(value)
+
+ @property
+ def put_machine_filters(self):
+ return self._filters
+
+ @put_machine_filters.setter
+ def put_machine_filters(self, value):
+ if not value:
+ return
+ self._filters = self.parse_filters(value)
+
+ @property
+ def patched_machine_filters(self):
+ return self._filters
+
+ @patched_machine_filters.setter
+ def patched_machine_filters(self, value):
+ if not value:
+ return
+ filters = list(self.machine_filters)
+ self._filters = self.parse_filters(value) + filters
+
+ def to_dict(self):
+ dict_info = super(Switch, self).to_dict()
+ dict_info['ip'] = self.ip
+ dict_info['filters'] = self.format_filters(self._filters)
+ return dict_info
+
+
+class Subnet(BASE, TimestampMixin, HelperMixin):
+ """network table."""
+ __tablename__ = 'subnet'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True, nullable=True)
+ subnet = Column(String(80), unique=True, nullable=False)
+
+ host_networks = relationship(
+ HostNetwork,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('subnet')
+ )
+
+ def __init__(self, subnet, **kwargs):
+ self.subnet = subnet
+ super(Subnet, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'Subnet[%s:%s]' % (self.id, self.subnet)
+
+ def to_dict(self):
+ dict_info = super(Subnet, self).to_dict()
+ if not self.name:
+ dict_info['name'] = self.subnet
+ return dict_info
+
+
+# TODO(grace): move this global variable into HealthCheckReport.
+HEALTH_REPORT_STATES = ('verifying', 'success', 'finished', 'error')
+
+
+class HealthCheckReport(BASE, HelperMixin):
+ """Health check report table."""
+ __tablename__ = 'health_check_report'
+
+ cluster_id = Column(
+ Integer,
+ ForeignKey('cluster.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+ name = Column(String(80), nullable=False, primary_key=True)
+ display_name = Column(String(100))
+ report = Column(JSONEncoded, default={})
+ category = Column(String(80), default='')
+ state = Column(
+ Enum(*HEALTH_REPORT_STATES, name='report_state'),
+ ColumnDefault('verifying'),
+ nullable=False
+ )
+ error_message = Column(Text, default='')
+
+ def __init__(self, cluster_id, name, **kwargs):
+ self.cluster_id = cluster_id
+ self.name = name
+ if 'state' in kwargs and kwargs['state'] not in HEALTH_REPORT_STATES:
+ err_msg = 'State value %s is not accepted.' % kwargs['state']
+ raise exception.InvalidParameter(err_msg)
+
+ super(HealthCheckReport, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'HealthCheckReport[cluster_id: %s, name: %s]' % (
+ self.cluster_id, self.name
+ )
diff --git a/compass-deck/db/v1/model.py b/compass-deck/db/v1/model.py
new file mode 100644
index 0000000..d74e355
--- /dev/null
+++ b/compass-deck/db/v1/model.py
@@ -0,0 +1,724 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""database model."""
+from datetime import datetime
+from hashlib import md5
+import logging
+import simplejson as json
+import uuid
+
+from sqlalchemy import Column, ColumnDefault, Integer, String
+from sqlalchemy import Float, Enum, DateTime, ForeignKey, Text, Boolean
+from sqlalchemy import UniqueConstraint
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.ext.hybrid import hybrid_property
+
+from compass.utils import util
+
+from flask.ext.login import UserMixin
+from itsdangerous import URLSafeTimedSerializer
+
+BASE = declarative_base()
+# TODO(grace) SECRET_KEY should be generated when installing compass
+# and save to a config file or DB
+SECRET_KEY = "abcd"
+
+# This is used for generating a token by user's ID and
+# decode the ID from this token
+login_serializer = URLSafeTimedSerializer(SECRET_KEY)
+
+
+class User(BASE, UserMixin):
+ """User table."""
+ __tablename__ = 'user'
+ id = Column(Integer, primary_key=True)
+ email = Column(String(80), unique=True)
+ password = Column(String(225), default='')
+ active = Column(Boolean, default=True)
+
+ def __init__(self, email, password, **kwargs):
+ self.email = email
+ self.password = self._set_password(password)
+
+ def __repr__(self):
+ return '<User name: %s>' % self.email
+
+ def _set_password(self, password):
+ return self._hash_password(password)
+
+ def get_password(self):
+ return self.password
+
+ def valid_password(self, password):
+ return self.password == self._hash_password(password)
+
+ def get_auth_token(self):
+ return login_serializer.dumps(self.id)
+
+ def is_active(self):
+ return self.active
+
+ def _hash_password(self, password):
+ return md5(password).hexdigest()
+
+
+class SwitchConfig(BASE):
+ """Swtich Config table.
+
+ :param id: The unique identifier of the switch config.
+ :param ip: The IP address of the switch.
+ :param filter_port: The port of the switch which need to be filtered.
+ """
+ __tablename__ = 'switch_config'
+ id = Column(Integer, primary_key=True)
+ ip = Column(String(80))
+ filter_port = Column(String(16))
+ __table_args__ = (UniqueConstraint('ip', 'filter_port', name='filter1'), )
+
+ def __init__(self, **kwargs):
+ super(SwitchConfig, self).__init__(**kwargs)
+
+
+class Switch(BASE):
+ """Switch table.
+
+ :param id: the unique identifier of the switch. int as primary key.
+ :param ip: the IP address of the switch.
+ :param vendor_info: the name of the vendor
+ :param credential_data: used for accessing and retrieving information
+ from the switch. Store json format as string.
+ :param state: Enum.'initialized/repolling': polling switch not complete to
+ learn all MAC addresses of devices connected to the switch;
+ 'unreachable': one of the final state, indicates that the
+ switch is unreachable at this time, no MAC address could be
+ retrieved from the switch.
+ 'notsupported': one of the final state, indicates that the
+ vendor found is not supported yet, no MAC address will be
+ retrieved from the switch.
+ 'error': one of the final state, indicates that something
+ wrong happend.
+ 'under_monitoring': one of the final state, indicates that
+ MAC addresses has been learned successfully from the switch.
+ :param err_msg: Error message when polling switch failed.
+ :param machines: refer to list of Machine connected to the switch.
+ """
+ __tablename__ = 'switch'
+
+ id = Column(Integer, primary_key=True)
+ ip = Column(String(80), unique=True)
+ credential_data = Column(Text)
+ vendor_info = Column(String(256), nullable=True)
+ state = Column(Enum('initialized', 'unreachable', 'notsupported',
+ 'repolling', 'error', 'under_monitoring',
+ name='switch_state'),
+ default='initialized')
+ err_msg = Column(Text)
+
+ def __init__(self, **kwargs):
+ super(Switch, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Switch ip: %r, credential: %r, vendor: %r, state: %s>'\
+ % (self.ip, self.credential, self.vendor, self.state)
+
+ @hybrid_property
+ def vendor(self):
+ """vendor property getter"""
+ return self.vendor_info
+
+ @vendor.setter
+ def vendor(self, value):
+ """vendor property setter"""
+ self.vendor_info = value
+
+ @property
+ def credential(self):
+ """credential data getter.
+
+ :returns: python primitive dictionary object.
+ """
+ if self.credential_data:
+ try:
+ credential = json.loads(self.credential_data)
+ return credential
+ except Exception as error:
+ logging.error('failed to load credential data %s: %s',
+ self.id, self.credential_data)
+ logging.exception(error)
+ raise error
+ else:
+ return {}
+
+ @credential.setter
+ def credential(self, value):
+ """credential property setter
+
+ :param value: dict of configuration data needed to update.
+ """
+ if value:
+ try:
+ credential = {}
+ if self.credential_data:
+ credential = json.loads(self.credential_data)
+
+ credential.update(value)
+ self.credential_data = json.dumps(credential)
+
+ except Exception as error:
+ logging.error('failed to dump credential data %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+
+ else:
+ self.credential_data = json.dumps({})
+
+ logging.debug('switch now is %s', self)
+
+
+class Machine(BASE):
+ """Machine table.
+
+ .. note::
+ currently, we are taking care of management plane.
+ Therefore, we assume one machine is connected to one switch.
+
+ :param id: int, identity as primary key
+ :param mac: string, the MAC address of the machine.
+ :param switch_id: switch id that this machine connected on to.
+ :param port: nth port of the switch that this machine connected.
+ :param vlan: vlan id that this machine connected on to.
+ :param update_timestamp: last time this entry got updated.
+ :param switch: refer to the Switch the machine connects to.
+ """
+ __tablename__ = 'machine'
+
+ id = Column(Integer, primary_key=True)
+ mac = Column(String(24), default='')
+ port = Column(String(16), default='')
+ vlan = Column(Integer, default=0)
+ update_timestamp = Column(DateTime, default=datetime.now,
+ onupdate=datetime.now)
+ switch_id = Column(Integer, ForeignKey('switch.id',
+ onupdate='CASCADE',
+ ondelete='SET NULL'))
+ __table_args__ = (UniqueConstraint('mac', 'switch_id',
+ name='unique_machine'),)
+ switch = relationship('Switch', backref=backref('machines',
+ lazy='dynamic'))
+
+ def __init__(self, **kwargs):
+ super(Machine, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Machine %r: port=%r vlan=%r switch=%r>' % (
+ self.mac, self.port, self.vlan, self.switch)
+
+
+class HostState(BASE):
+ """The state of the ClusterHost.
+
+ :param id: int, identity as primary key.
+ :param state: Enum. 'UNINITIALIZED': the host is ready to setup.
+ 'INSTALLING': the host is not installing.
+ 'READY': the host is setup.
+ 'ERROR': the host has error.
+ :param progress: float, the installing progress from 0 to 1.
+ :param message: the latest installing message.
+ :param severity: Enum, the installing message severity.
+ ('INFO', 'WARNING', 'ERROR')
+ :param update_timestamp: the lastest timestamp the entry got updated.
+ :param host: refer to ClusterHost.
+ :param os_progress: float, the installing progress of OS from 0 to 1.
+ """
+ __tablename__ = "host_state"
+
+ id = Column(Integer, ForeignKey('cluster_host.id',
+ onupdate='CASCADE',
+ ondelete='CASCADE'),
+ primary_key=True)
+ state = Column(Enum('UNINITIALIZED', 'INSTALLING', 'READY', 'ERROR'),
+ ColumnDefault('UNINITIALIZED'))
+ progress = Column(Float, ColumnDefault(0.0))
+ message = Column(Text)
+ severity = Column(Enum('INFO', 'WARNING', 'ERROR'), ColumnDefault('INFO'))
+ update_timestamp = Column(DateTime, default=datetime.now,
+ onupdate=datetime.now)
+ host = relationship('ClusterHost', backref=backref('state',
+ uselist=False))
+
+ os_progress = Column(Float, ColumnDefault(0.0))
+ os_message = Column(Text)
+ os_severity = Column(
+ Enum('INFO', 'WARNING', 'ERROR'),
+ ColumnDefault('INFO')
+ )
+ """
+ this is added by Lei for separating os and package progress purposes
+ os_state = Column(Enum('UNINITIALIZED', 'INSTALLING', 'OS_READY', 'ERROR'),
+ ColumnDefault('UNINITIALIZED'))
+ """
+
+ def __init__(self, **kwargs):
+ super(HostState, self).__init__(**kwargs)
+
+ @hybrid_property
+ def hostname(self):
+ """hostname getter"""
+ return self.host.hostname
+
+ @hybrid_property
+ def fullname(self):
+ """fullname getter"""
+ return self.host.fullname
+
+ def __repr__(self):
+ return (
+ '<HostState %r: state=%r, progress=%s, '
+ 'message=%s, severity=%s, os_progress=%s>'
+ ) % (
+ self.hostname, self.state, self.progress,
+ self.message, self.severity, self.os_progress
+ )
+
+
+class ClusterState(BASE):
+ """The state of the Cluster.
+
+ :param id: int, identity as primary key.
+ :param state: Enum, 'UNINITIALIZED': the cluster is ready to setup.
+ 'INSTALLING': the cluster is not installing.
+ 'READY': the cluster is setup.
+ 'ERROR': the cluster has error.
+ :param progress: float, the installing progress from 0 to 1.
+ :param message: the latest installing message.
+ :param severity: Enum, the installing message severity.
+ ('INFO', 'WARNING', 'ERROR').
+ :param update_timestamp: the lastest timestamp the entry got updated.
+ :param cluster: refer to Cluster.
+ """
+ __tablename__ = 'cluster_state'
+ id = Column(Integer, ForeignKey('cluster.id',
+ onupdate='CASCADE',
+ ondelete='CASCADE'),
+ primary_key=True)
+ state = Column(Enum('UNINITIALIZED', 'INSTALLING', 'READY', 'ERROR'),
+ ColumnDefault('UNINITIALIZED'))
+ progress = Column(Float, ColumnDefault(0.0))
+ message = Column(Text)
+ severity = Column(Enum('INFO', 'WARNING', 'ERROR'), ColumnDefault('INFO'))
+ update_timestamp = Column(DateTime, default=datetime.now,
+ onupdate=datetime.now)
+ cluster = relationship('Cluster', backref=backref('state',
+ uselist=False))
+
+ def __init__(self, **kwargs):
+ super(ClusterState, self).__init__(**kwargs)
+
+ @hybrid_property
+ def clustername(self):
+ """clustername getter"""
+ return self.cluster.name
+
+ def __repr__(self):
+ return (
+ '<ClusterState %r: state=%r, progress=%s, '
+ 'message=%s, severity=%s>'
+ ) % (
+ self.clustername, self.state, self.progress,
+ self.message, self.severity
+ )
+
+
+class Cluster(BASE):
+ """Cluster configuration information.
+
+ :param id: int, identity as primary key.
+ :param name: str, cluster name.
+ :param mutable: bool, if the Cluster is mutable.
+ :param security_config: str stores json formatted security information.
+ :param networking_config: str stores json formatted networking information.
+ :param partition_config: string stores json formatted parition information.
+ :param adapter_id: the refer id in the Adapter table.
+ :param raw_config: str stores json formatted other cluster information.
+ :param adapter: refer to the Adapter.
+ :param state: refer to the ClusterState.
+ """
+ __tablename__ = 'cluster'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True)
+ mutable = Column(Boolean, default=True)
+ security_config = Column(Text)
+ networking_config = Column(Text)
+ partition_config = Column(Text)
+ adapter_id = Column(Integer, ForeignKey('adapter.id',
+ onupdate='CASCADE',
+ ondelete='SET NULL'),
+ nullable=True)
+ raw_config = Column(Text)
+ adapter = relationship("Adapter", backref=backref('clusters',
+ lazy='dynamic'))
+
+ def __init__(self, **kwargs):
+ if 'name' not in kwargs or not kwargs['name']:
+ kwargs['name'] = str(uuid.uuid4())
+
+ super(Cluster, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Cluster %r: config=%r>' % (self.name, self.config)
+
+ @property
+ def partition(self):
+ """partition getter"""
+ if self.partition_config:
+ try:
+ return json.loads(self.partition_config)
+ except Exception as error:
+ logging.error('failed to load security config %s: %s',
+ self.id, self.partition_config)
+ logging.exception(error)
+ raise error
+ else:
+ return {}
+
+ @partition.setter
+ def partition(self, value):
+ """partition setter"""
+ logging.debug('cluster %s set partition %s', self.id, value)
+ if value:
+ try:
+ self.partition_config = json.dumps(value)
+ except Exception as error:
+ logging.error('failed to dump partition config %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+ else:
+ self.partition_config = None
+
+ @property
+ def security(self):
+ """security getter"""
+ if self.security_config:
+ try:
+ return json.loads(self.security_config)
+ except Exception as error:
+ logging.error('failed to load security config %s: %s',
+ self.id, self.security_config)
+ logging.exception(error)
+ raise error
+ else:
+ return {}
+
+ @security.setter
+ def security(self, value):
+ """security setter"""
+ logging.debug('cluster %s set security %s', self.id, value)
+ if value:
+ try:
+ self.security_config = json.dumps(value)
+ except Exception as error:
+ logging.error('failed to dump security config %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+ else:
+ self.security_config = None
+
+ @property
+ def networking(self):
+ """networking getter"""
+ if self.networking_config:
+ try:
+ return json.loads(self.networking_config)
+ except Exception as error:
+ logging.error('failed to load networking config %s: %s',
+ self.id, self.networking_config)
+ logging.exception(error)
+ raise error
+ else:
+ return {}
+
+ @networking.setter
+ def networking(self, value):
+ """networking setter."""
+ logging.debug('cluster %s set networking %s', self.id, value)
+ if value:
+ try:
+ self.networking_config = json.dumps(value)
+ except Exception as error:
+ logging.error('failed to dump networking config %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+ else:
+ self.networking_config = None
+
+ @hybrid_property
+ def config(self):
+ """get config from security, networking, partition."""
+ config = {}
+ if self.raw_config:
+ try:
+ config = json.loads(self.raw_config)
+ except Exception as error:
+ logging.error('failed to load raw config %s: %s',
+ self.id, self.raw_config)
+ logging.exception(error)
+ raise error
+
+ util.merge_dict(config, {'security': self.security})
+ util.merge_dict(config, {'networking': self.networking})
+ util.merge_dict(config, {'partition': self.partition})
+ util.merge_dict(config, {'clusterid': self.id,
+ 'clustername': self.name})
+ return config
+
+ @config.setter
+ def config(self, value):
+ """set config to security, networking, partition."""
+ logging.debug('cluster %s set config %s', self.id, value)
+ if not value:
+ self.security = None
+ self.networking = None
+ self.partition = None
+ self.raw_config = None
+ return
+
+ self.security = value.get('security')
+ self.networking = value.get('networking')
+ self.partition = value.get('partition')
+
+ try:
+ self.raw_config = json.dumps(value)
+ except Exception as error:
+ logging.error('failed to dump raw config %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+
+
+class ClusterHost(BASE):
+ """ClusterHost information.
+
+ :param id: int, identity as primary key.
+ :param machine_id: int, the id of the Machine.
+ :param cluster_id: int, the id of the Cluster.
+ :param mutable: if the ClusterHost information is mutable.
+ :param hostname: str, host name.
+ :param config_data: string, json formatted config data.
+ :param cluster: refer to Cluster the host in.
+ :param machine: refer to the Machine the host on.
+ :param state: refer to HostState indicates the host state.
+ """
+ __tablename__ = 'cluster_host'
+
+ id = Column(Integer, primary_key=True)
+
+ machine_id = Column(Integer, ForeignKey('machine.id',
+ onupdate='CASCADE',
+ ondelete='CASCADE'),
+ nullable=True, unique=True)
+
+ cluster_id = Column(Integer, ForeignKey('cluster.id',
+ onupdate='CASCADE',
+ ondelete='SET NULL'),
+ nullable=True)
+
+ hostname = Column(String(80))
+ config_data = Column(Text)
+ mutable = Column(Boolean, default=True)
+ __table_args__ = (UniqueConstraint('cluster_id', 'hostname',
+ name='unique_host'),)
+
+ cluster = relationship("Cluster",
+ backref=backref('hosts', lazy='dynamic'))
+ machine = relationship("Machine",
+ backref=backref('host', uselist=False))
+
+ def __init__(self, **kwargs):
+ if 'hostname' not in kwargs or not kwargs['hostname']:
+ kwargs['hostname'] = str(uuid.uuid4())
+
+ super(ClusterHost, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<ClusterHost %r: cluster=%r machine=%r>' % (
+ self.hostname, self.cluster, self.machine)
+
+ @hybrid_property
+ def fullname(self):
+ return '%s.%s' % (self.hostname, self.cluster.id)
+
+ @property
+ def config(self):
+ """config getter."""
+ config = {}
+ try:
+ if self.config_data:
+ config.update(json.loads(self.config_data))
+
+ config.update({
+ 'hostid': self.id,
+ 'hostname': self.hostname,
+ })
+ if self.cluster:
+ config.update({
+ 'clusterid': self.cluster.id,
+ 'clustername': self.cluster.name,
+ 'fullname': self.fullname,
+ })
+
+ if self.machine:
+ util.merge_dict(
+ config, {
+ 'networking': {
+ 'interfaces': {
+ 'management': {
+ 'mac': self.machine.mac
+ }
+ }
+ },
+ 'switch_port': self.machine.port,
+ 'vlan': self.machine.vlan,
+ })
+ if self.machine.switch:
+ util.merge_dict(
+ config, {'switch_ip': self.machine.switch.ip})
+
+ except Exception as error:
+ logging.error('failed to load config %s: %s',
+ self.hostname, self.config_data)
+ logging.exception(error)
+ raise error
+
+ return config
+
+ @config.setter
+ def config(self, value):
+ """config setter"""
+ if not self.config_data:
+ config = {
+ }
+ self.config_data = json.dumps(config)
+
+ if value:
+ try:
+ config = json.loads(self.config_data)
+ util.merge_dict(config, value)
+
+ self.config_data = json.dumps(config)
+ except Exception as error:
+ logging.error('failed to dump config %s: %s',
+ self.hostname, value)
+ logging.exception(error)
+ raise error
+
+
+class LogProgressingHistory(BASE):
+ """host installing log history for each file.
+
+ :param id: int, identity as primary key.
+ :param pathname: str, the full path of the installing log file. unique.
+ :param position: int, the position of the log file it has processed.
+ :param partial_line: str, partial line of the log.
+ :param progressing: float, indicate the installing progress between 0 to 1.
+ :param message: str, str, the installing message.
+ :param severity: Enum, the installing message severity.
+ ('ERROR', 'WARNING', 'INFO')
+ :param line_matcher_name: str, the line matcher name of the log processor.
+ :param update_timestamp: datetime, the latest timestamp the entry updated.
+ """
+ __tablename__ = 'log_progressing_history'
+ id = Column(Integer, primary_key=True)
+ pathname = Column(String(80), unique=True)
+ position = Column(Integer, ColumnDefault(0))
+ partial_line = Column(Text)
+ progress = Column(Float, ColumnDefault(0.0))
+ message = Column(Text)
+ severity = Column(Enum('ERROR', 'WARNING', 'INFO'), ColumnDefault('INFO'))
+ line_matcher_name = Column(String(80), ColumnDefault('start'))
+ update_timestamp = Column(DateTime, default=datetime.now,
+ onupdate=datetime.now)
+
+ def __init__(self, **kwargs):
+ super(LogProgressingHistory, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return (
+ 'LogProgressingHistory[%r: position %r,'
+ 'partial_line %r,progress %r,message %r,'
+ 'severity %r]'
+ ) % (
+ self.pathname, self.position,
+ self.partial_line,
+ self.progress,
+ self.message,
+ self.severity
+ )
+
+
+class Adapter(BASE):
+ """Table stores ClusterHost installing Adapter information.
+
+ :param id: int, identity as primary key.
+ :param name: string, adapter name, unique.
+ :param os: string, os name for installing the host.
+ :param target_system: string, target system to be installed on the host.
+ :param clusters: refer to the list of Cluster.
+ """
+ __tablename__ = 'adapter'
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True)
+ os = Column(String(80))
+ target_system = Column(String(80))
+ __table_args__ = (
+ UniqueConstraint('os', 'target_system', name='unique_adapter'),)
+
+ def __init__(self, **kwargs):
+ super(Adapter, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Adapter %r: os %r, target_system %r>' % (
+ self.name, self.os, self.target_system
+ )
+
+
+class Role(BASE):
+ """The Role table stores avaiable roles of one target system.
+
+ .. note::
+ the host can be deployed to one or several roles in the cluster.
+
+ :param id: int, identity as primary key.
+ :param name: role name.
+ :param target_system: str, the target_system.
+ :param description: str, the description of the role.
+ """
+ __tablename__ = 'role'
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True)
+ target_system = Column(String(80))
+ description = Column(Text)
+
+ def __init__(self, **kwargs):
+ super(Role, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Role %r : target_system %r, description:%r>' % (
+ self.name, self.target_system, self.description)
diff --git a/compass-deck/db/validator.py b/compass-deck/db/validator.py
new file mode 100644
index 0000000..730bb52
--- /dev/null
+++ b/compass-deck/db/validator.py
@@ -0,0 +1,195 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Validator methods."""
+import logging
+import netaddr
+import re
+import socket
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+def is_valid_ip(name, ip_addr, **kwargs):
+ """Valid the format of an IP address."""
+ if isinstance(ip_addr, list):
+ return all([
+ is_valid_ip(name, item, **kwargs) for item in ip_addr
+ ])
+ try:
+ netaddr.IPAddress(ip_addr)
+ except Exception:
+ logging.debug('%s invalid ip addr %s', name, ip_addr)
+ return False
+ return True
+
+
+def is_valid_network(name, ip_network, **kwargs):
+ """Valid the format of an Ip network."""
+ if isinstance(ip_network, list):
+ return all([
+ is_valid_network(name, item, **kwargs) for item in ip_network
+ ])
+ try:
+ netaddr.IPNetwork(ip_network)
+ except Exception:
+ logging.debug('%s invalid network %s', name, ip_network)
+ return False
+ return True
+
+
+def is_valid_netmask(name, ip_addr, **kwargs):
+ """Valid the format of a netmask."""
+ if isinstance(ip_addr, list):
+ return all([
+ is_valid_netmask(name, item, **kwargs) for item in ip_addr
+ ])
+ if not is_valid_ip(ip_addr):
+ return False
+ ip = netaddr.IPAddress(ip_addr)
+ if ip.is_netmask():
+ return True
+ logging.debug('%s invalid netmask %s', name, ip_addr)
+ return False
+
+
+def is_valid_gateway(name, ip_addr, **kwargs):
+ """Valid the format of gateway."""
+ if isinstance(ip_addr, list):
+ return all([
+ is_valid_gateway(name, item, **kwargs) for item in ip_addr
+ ])
+ if not is_valid_ip(ip_addr):
+ return False
+ ip = netaddr.IPAddress(ip_addr)
+ if ip.is_private() or ip.is_public():
+ return True
+ logging.debug('%s invalid gateway %s', name, ip_addr)
+ return False
+
+
+def is_valid_dns(name, dns, **kwargs):
+ """Valid the format of DNS."""
+ if isinstance(dns, list):
+ return all([is_valid_dns(name, item, **kwargs) for item in dns])
+ if is_valid_ip(dns):
+ return True
+ try:
+ socket.gethostbyname_ex(dns)
+ except Exception:
+ logging.debug('%s invalid dns name %s', name, dns)
+ return False
+ return True
+
+
+def is_valid_url(name, url, **kwargs):
+ """Valid the format of url."""
+ if isinstance(url, list):
+ return all([
+ is_valid_url(name, item, **kwargs) for item in url
+ ])
+ if re.match(
+ r'^(http|https|ftp)://([0-9A-Za-z_-]+)(\.[0-9a-zA-Z_-]+)*'
+ r'(:\d+)?(/[0-9a-zA-Z_-]+)*$',
+ url
+ ):
+ return True
+ logging.debug(
+ '%s invalid url %s', name, url
+ )
+ return False
+
+
+def is_valid_domain(name, domain, **kwargs):
+ """Validate the format of domain."""
+ if isinstance(domain, list):
+ return all([
+ is_valid_domain(name, item, **kwargs) for item in domain
+ ])
+ if re.match(
+ r'^([0-9a-zA-Z_-]+)(\.[0-9a-zA-Z_-]+)*$',
+ domain
+ ):
+ return True
+ logging.debug(
+ '%s invalid domain %s', name, domain
+ )
+ return False
+
+
+def is_valid_username(name, username, **kwargs):
+ """Valid the format of username."""
+ if bool(username):
+ return True
+ logging.debug(
+ '%s username is empty', name
+ )
+
+
+def is_valid_password(name, password, **kwargs):
+ """Valid the format of password."""
+ if bool(password):
+ return True
+ logging.debug('%s password is empty', name)
+ return False
+
+
+def is_valid_partition(name, partition, **kwargs):
+ """Valid the format of partition name."""
+ if name != 'swap' and not name.startswith('/'):
+ logging.debug(
+ '%s is not started with / or swap', name
+ )
+ return False
+ if 'size' not in partition and 'percentage' not in partition:
+ logging.debug(
+ '%s partition does not contain sie or percentage',
+ name
+ )
+ return False
+ return True
+
+
+def is_valid_percentage(name, percentage, **kwargs):
+ """Valid the percentage."""
+ if 0 <= percentage <= 100:
+ return True
+ logging.debug('%s invalid percentage %s', name, percentage)
+
+
+def is_valid_port(name, port, **kwargs):
+ """Valid the format of port."""
+ if 0 < port < 65536:
+ return True
+ logging.debug('%s invalid port %s', name, port)
+
+
+def is_valid_size(name, size, **kwargs):
+ if re.match(r'^(\d+)(K|M|G|T)$', size):
+ return True
+ logging.debug('%s invalid size %s', name, size)
+ return False
+
+
+VALIDATOR_GLOBALS = globals()
+VALIDATOR_LOCALS = locals()
+VALIDATOR_CONFIGS = util.load_configs(
+ setting.VALIDATOR_DIR,
+ config_name_suffix='.py',
+ env_globals=VALIDATOR_GLOBALS,
+ env_locals=VALIDATOR_LOCALS
+)
+for validator_config in VALIDATOR_CONFIGS:
+ VALIDATOR_LOCALS.update(validator_config)
diff --git a/compass-deck/deployment/__init__.py b/compass-deck/deployment/__init__.py
new file mode 100644
index 0000000..cbd36e0
--- /dev/null
+++ b/compass-deck/deployment/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
diff --git a/compass-deck/deployment/deploy_manager.py b/compass-deck/deployment/deploy_manager.py
new file mode 100644
index 0000000..baf7cd6
--- /dev/null
+++ b/compass-deck/deployment/deploy_manager.py
@@ -0,0 +1,237 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
+
+"""Module to get configs from provider and isntallers and update
+ them to provider and installers.
+"""
+from compass.deployment.installers.installer import OSInstaller
+from compass.deployment.installers.installer import PKInstaller
+from compass.deployment.utils import constants as const
+from compass.utils import util
+
+
+import logging
+
+
+class DeployManager(object):
+ """Deploy manager module."""
+ def __init__(self, adapter_info, cluster_info, hosts_info):
+ """Init deploy manager."""
+ self.os_installer = None
+ self.pk_installer = None
+
+ # Get OS installer
+ os_installer_name = adapter_info[const.OS_INSTALLER][const.NAME]
+ self.os_installer = DeployManager._get_installer(OSInstaller,
+ os_installer_name,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ # Get package installer
+ pk_info = adapter_info.setdefault(const.PK_INSTALLER, {})
+ if pk_info:
+ pk_installer_name = pk_info[const.NAME]
+ self.pk_installer = DeployManager._get_installer(PKInstaller,
+ pk_installer_name,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ @staticmethod
+ def _get_installer(installer_type, name, adapter_info, cluster_info,
+ hosts_info):
+ """Get installer instance."""
+ callback = getattr(installer_type, 'get_installer')
+ installer = callback(name, adapter_info, cluster_info, hosts_info)
+
+ return installer
+
+ def deploy(self):
+ """Deploy the cluster."""
+ deployed_config = self.deploy_os()
+ package_deployed_config = self.deploy_target_system()
+
+ util.merge_dict(deployed_config, package_deployed_config)
+
+ return deployed_config
+
+ def check_cluster_health(self, callback_url):
+ logging.info("DeployManager check_cluster_health...........")
+ self.pk_installer.check_cluster_health(callback_url)
+
+ def clean_progress(self):
+ """Clean previous installation log and progress."""
+ self.clean_os_installtion_progress()
+ self.clean_package_installation_progress()
+
+ def clean_os_installtion_progress(self):
+ # OS installer cleans previous installing progress.
+ if self.os_installer:
+ self.os_installer.clean_progress()
+
+ def clean_package_installation_progress(self):
+ # Package installer cleans previous installing progress.
+ if self.pk_installer:
+ self.pk_installer.clean_progress()
+
+ def prepare_for_deploy(self):
+ self.clean_progress()
+
+ def deploy_os(self):
+ """Deploy OS to hosts which need to in the cluster.
+
+ Return OS deployed config.
+ """
+ if not self.os_installer:
+ return {}
+
+ pk_installer_config = {}
+ if self.pk_installer:
+ # generate target system config which will be installed by OS
+ # installer right after OS installation is completed.
+ pk_installer_config = self.pk_installer.generate_installer_config()
+ logging.debug('[DeployManager]package installer config is %s',
+ pk_installer_config)
+
+ # Send package installer config info to OS installer.
+ self.os_installer.set_package_installer_config(pk_installer_config)
+
+ # start to deploy OS
+ return self.os_installer.deploy()
+
+ def deploy_target_system(self):
+ """Deploy target system to all hosts in the cluster.
+
+ Return package deployed config.
+ """
+ if not self.pk_installer:
+ return {}
+
+ return self.pk_installer.deploy()
+
+ def redeploy_os(self):
+ """Redeploy OS for this cluster without changing configurations."""
+ if not self.os_installer:
+ logging.info("Redeploy_os: No OS installer found!")
+ return
+
+ self.os_installer.redeploy()
+ logging.info("Start to redeploy OS for cluster.")
+
+ def redeploy_target_system(self):
+ """Redeploy target system for the cluster without changing config."""
+ if not self.pk_installer:
+ logging.info("Redeploy_target_system: No installer found!")
+ return
+
+ self.pk_installer.deploy()
+ logging.info("Start to redeploy target system.")
+
+ def redeploy(self):
+ """Redeploy the cluster without changing configurations."""
+ self.redeploy_os()
+ self.redeploy_target_system()
+
+ def remove_hosts(self, package_only=False, delete_cluster=False):
+ """Remove hosts from both OS and/or package installlers server side."""
+ if self.os_installer and not package_only:
+ self.os_installer.delete_hosts()
+
+ if self.pk_installer:
+ self.pk_installer.delete_hosts(delete_cluster=delete_cluster)
+
+ def os_installed(self):
+ if self.os_installer:
+ self.os_installer.ready()
+ if self.pk_installer:
+ self.pk_installer.os_ready()
+
+ def cluster_os_installed(self):
+ if self.os_installer:
+ self.os_installer.cluster_ready()
+ if self.pk_installer:
+ self.pk_installer.cluster_os_ready()
+
+ def package_installed(self):
+ if self.pk_installer:
+ self.pk_installer.ready()
+
+ def cluster_installed(self):
+ if self.pk_installer:
+ self.pk_installer.cluster_ready()
+
+
+class Patcher(DeployManager):
+ """Patcher Module."""
+ def __init__(self, adapter_info, cluster_info, hosts_info, cluster_hosts):
+ self.pk_installer = None
+ self.cluster_info = cluster_info
+ registered_roles = cluster_info['flavor']['roles']
+
+ pk_info = adapter_info.setdefault(const.PK_INSTALLER, {})
+ if pk_info:
+ pk_installer_name = pk_info[const.NAME]
+ self.pk_installer = Patcher._get_installer(PKInstaller,
+ pk_installer_name,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ patched_role_mapping = {}
+ for role in registered_roles:
+ patched_role_mapping[role] = []
+ for host in cluster_hosts:
+ if len(host['patched_roles']) == 0:
+ continue
+ for role in host['patched_roles']:
+ patched_role_mapping[role['name']].append(host)
+ self.patched_role_mapping = patched_role_mapping
+
+ def patch(self):
+ patched_config = self.pk_installer.patch(self.patched_role_mapping)
+
+ return patched_config
+
+
+class PowerManager(object):
+ """Manage host to power on, power off, and reset."""
+
+ def __init__(self, adapter_info, cluster_info, hosts_info):
+ os_installer_name = adapter_info[const.OS_INSTALLER][const.NAME]
+ self.os_installer = DeployManager._get_installer(OSInstaller,
+ os_installer_name,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ def poweron(self):
+ if not self.os_installer:
+ logging.info("No OS installer found, cannot power on machine!")
+ return
+ self.os_installer.poweron()
+
+ def poweroff(self):
+ if not self.os_installer:
+ logging.info("No OS installer found, cannot power on machine!")
+ return
+ self.os_installer.poweroff()
+
+ def reset(self):
+ if not self.os_installer:
+ logging.info("No OS installer found, cannot power on machine!")
+ return
+ self.os_installer.reset()
diff --git a/compass-deck/deployment/installers/__init__.py b/compass-deck/deployment/installers/__init__.py
new file mode 100644
index 0000000..0296be5
--- /dev/null
+++ b/compass-deck/deployment/installers/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
+
+
+"""modules to read/write cluster/host config from installers.
+
+ .. moduleauthor:: Grace Yu <grace.yu@huawei.com>
+"""
diff --git a/compass-deck/deployment/installers/config_manager.py b/compass-deck/deployment/installers/config_manager.py
new file mode 100644
index 0000000..597c3a6
--- /dev/null
+++ b/compass-deck/deployment/installers/config_manager.py
@@ -0,0 +1,527 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "baigk baiguoku@huawei.com)"
+
+from collections import defaultdict
+from copy import deepcopy
+import json
+import logging
+import netaddr
+
+from compass.deployment.utils import constants as const
+
+ip_generator_map = {}
+
+
+def get_ip_addr(ip_ranges):
+ def _get_ip_addr():
+ for ip_range in ip_ranges:
+ for ip in netaddr.iter_iprange(*ip_range):
+ yield str(ip)
+
+ s = json.dumps(ip_ranges)
+ if s not in ip_generator_map:
+ ip_generator_map[s] = _get_ip_addr()
+ return ip_generator_map[s]
+ else:
+ return ip_generator_map[s]
+
+
+class AdapterInfo(object):
+ def __init__(self, adapter_info):
+ self.adapter_info = adapter_info
+ self.name = self.adapter_info.get(const.NAME)
+ self.dist_system_name = self.name
+ self.health_check_cmd = self.adapter_info.get(const.HEALTH_CHECK_CMD)
+
+ self.os_installer = self.adapter_info.setdefault(
+ const.OS_INSTALLER, {}
+ )
+ self.os_installer.setdefault(const.INSTALLER_SETTINGS, {})
+
+ self.package_installer = self.adapter_info.setdefault(
+ const.PK_INSTALLER, {}
+ )
+ self.package_installer.setdefault(const.INSTALLER_SETTINGS, {})
+
+ self.metadata = self.adapter_info.setdefault(const.METADATA, {})
+ self.os_metadata = self.metadata.setdefault(const.OS_CONFIG, {})
+ self.package_metadata = self.metadata.setdefault(const.PK_CONFIG, {})
+
+ self.flavors = dict([(f[const.FLAVOR_NAME], f)
+ for f in self.adapter_info.get(const.FLAVOR, [])])
+
+ @property
+ def flavor_list(self):
+ return self.flavors.values()
+
+ def get_flavor(self, flavor_name):
+ return self.flavors.get(flavor_name)
+
+
+class ClusterInfo(object):
+ def __init__(self, cluster_info):
+ self.cluster_info = cluster_info
+ self.id = self.cluster_info.get(const.ID)
+ self.name = self.cluster_info.get(const.NAME)
+ self.os_version = self.cluster_info.get(const.OS_VERSION)
+ self.flavor = self.cluster_info.setdefault(
+ const.FLAVOR, {}
+ )
+ self.os_config = self.cluster_info.setdefault(
+ const.OS_CONFIG, {}
+ )
+ self.package_config = self.cluster_info.setdefault(
+ const.PK_CONFIG, {}
+ )
+ self.deployed_os_config = self.cluster_info.setdefault(
+ const.DEPLOYED_OS_CONFIG, {}
+ )
+ self.deployed_package_config = self.cluster_info.setdefault(
+ const.DEPLOYED_PK_CONFIG, {}
+ )
+ self.network_mapping = self.package_config.setdefault(
+ const.NETWORK_MAPPING, {}
+ )
+
+ os_config_general = self.os_config.setdefault(
+ const.OS_CONFIG_GENERAL, {}
+ )
+ self.domain = os_config_general.setdefault(const.DOMAIN, None)
+ self.hosts = []
+
+ def add_host(self, host):
+ self.hosts.append(host)
+
+ @property
+ def roles_mapping(self):
+ deploy_config = self.deployed_package_config
+ return deploy_config.setdefault(
+ const.ROLES_MAPPING, self._get_cluster_roles_mapping()
+ )
+
+ def _get_cluster_roles_mapping(self):
+ """The ouput format will be as below, for example:
+
+ {
+ "controller": [{
+ "hostname": "xxx",
+ "management": {
+ "interface": "eth0",
+ "ip": "192.168.1.10",
+ "netmask": "255.255.255.0",
+ "subnet": "192.168.1.0/24",
+ "is_mgmt": True,
+ "is_promiscuous": False
+ },
+ ...
+ }],
+ ...
+ }
+ """
+ mapping = defaultdict(list)
+ for host in self.hosts:
+ for role, value in host.roles_mapping.iteritems():
+ mapping[role].append(value)
+
+ return dict(mapping)
+
+ def _get_cluster_patched_roles_mapping(self):
+ mapping = defaultdict(list)
+ for host in self.hosts:
+ for role, value in host.patched_roles_mapping.iteritems():
+ mapping[role].append(value)
+
+ return dict(mapping)
+
+ @property
+ def base_info(self):
+ return {
+ const.ID: self.id,
+ const.NAME: self.name,
+ const.OS_VERSION: self.os_version
+ }
+
+
+class HostInfo(object):
+ def __init__(self, host_info, cluster_info):
+ self.host_info = host_info
+ self.cluster_info = cluster_info
+ self.id = self.host_info.get(const.ID)
+ self.name = self.host_info.get(const.NAME)
+ self.mac = self.host_info.get(const.MAC_ADDR)
+ self.hostname = self.host_info.get(const.HOSTNAME)
+ self.networks = self.host_info.setdefault(const.NETWORKS, {})
+ self.os_config = self.host_info.setdefault(const.OS_CONFIG, {})
+
+ self.package_config = self.host_info.setdefault(const.PK_CONFIG, {})
+ self.roles = self.host_info.setdefault(const.ROLES, [])
+ self.patched_roles = self.host_info.setdefault(const.PATCHED_ROLES, [])
+ self.ipmi = deepcopy(self.host_info.setdefault(const.IPMI, {}))
+ self.reinstall_os_flag = self.host_info.get(const.REINSTALL_OS_FLAG)
+ self.deployed_os_config = self.host_info.setdefault(
+ const.DEPLOYED_OS_CONFIG, {}
+ )
+ self.deployed_package_config = self.host_info.setdefault(
+ const.DEPLOYED_PK_CONFIG, {}
+ )
+
+ os_general_config = self.os_config.setdefault(
+ const.OS_CONFIG_GENERAL, {}
+ )
+ domain = os_general_config.setdefault(const.DOMAIN, None)
+ if domain is None:
+ self.domain = self.cluster_info.domain
+ else:
+ self.domain = domain
+
+ if const.DNS in host_info:
+ self.dns = host_info[const.DNS]
+ else:
+ self.dns = '.'.join((self.hostname, self.domain))
+
+ if const.NETWORK_MAPPING not in self.package_config:
+ self.network_mapping = self.cluster_info.network_mapping
+ else:
+ self.network_mapping = self.package_config[const.NETWORK_MAPPING]
+
+ if const.ROLES_MAPPING not in self.deployed_package_config:
+ self.roles_mapping = self._get_host_roles_mapping()
+ self.deployed_package_config[
+ const.ROLES_MAPPING
+ ] = self.roles_mapping
+ else:
+ self.roles_mapping = \
+ self.deployed_package_config[const.ROLES_MAPPING]
+
+ self.patched_roles_mapping = self._get_host_patched_roles_mapping()
+
+ self.cluster_info.add_host(self)
+
+ def valid_interface(self, interface):
+ if interface not in self.networks:
+ raise RuntimeError("interface %s is invalid" % interface)
+
+ def get_interface(self, interface):
+ self.valid_interface(interface)
+ return self.networks[interface]
+
+ def get_interface_ip(self, interface):
+ return self.get_interface(interface).get(const.IP_ADDR)
+
+ def get_interface_netmask(self, interface):
+ return self.get_interface(interface).get(const.NETMASK)
+
+ def get_interface_subnet(self, interface):
+ return self.get_interface(interface).get(const.SUBNET)
+
+ def is_interface_promiscuous(self, interface):
+ return self.get_interface(interface).get(const.PROMISCUOUS_FLAG)
+
+ def is_interface_mgmt(self, interface):
+ return self.get_interface(interface).get(const.MGMT_NIC_FLAG)
+
+ def _get_host_roles_mapping(self):
+ if not self.network_mapping:
+ return {}
+
+ net_info = {const.HOSTNAME: self.hostname}
+ for k, v in self.network_mapping.items():
+ try:
+ net_info[k] = self.networks[v[const.NIC]]
+ net_info[k][const.NIC] = v[const.NIC]
+ except Exception:
+ pass
+
+ mapping = {}
+ for role in self.roles:
+ role = role.replace("-", "_")
+ mapping[role] = net_info
+
+ return mapping
+
+ def _get_host_patched_roles_mapping(self):
+ if not self.network_mapping:
+ return {}
+
+ net_info = {const.HOSTNAME: self.hostname}
+ for k, v in self.network_mapping.items():
+ try:
+ net_info[k] = self.networks[v[const.NIC]]
+ net_info[k][const.NIC] = v[const.NIC]
+ except Exception:
+ pass
+
+ mapping = {}
+ for role in self.patched_roles:
+ role = role['name'].replace("-", "_")
+ mapping[role] = net_info
+
+ return mapping
+
+ @property
+ def baseinfo(self):
+ return {
+ const.REINSTALL_OS_FLAG: self.reinstall_os_flag,
+ const.MAC_ADDR: self.mac,
+ const.NAME: self.name,
+ const.HOSTNAME: self.hostname,
+ const.DNS: self.dns,
+ const.NETWORKS: deepcopy(self.networks)
+ }
+
+
+class BaseConfigManager(object):
+ def __init__(self, adapter_info={}, cluster_info={}, hosts_info={}):
+ assert(adapter_info and isinstance(adapter_info, dict))
+ assert(cluster_info and isinstance(cluster_info, dict))
+ assert(hosts_info and isinstance(hosts_info, dict))
+
+ self.adapter_info = AdapterInfo(adapter_info)
+ self.cluster_info = ClusterInfo(cluster_info)
+ self.hosts_info = dict([(k, HostInfo(v, self.cluster_info))
+ for k, v in hosts_info.iteritems()])
+
+ def get_adapter_name(self):
+ return self.adapter_info.name
+
+ def get_dist_system_name(self):
+ return self.adapter_info.dist_system_name
+
+ def get_adapter_health_check_cmd(self):
+ return self.adapter_info.health_check_cmd
+
+ def get_os_installer_settings(self):
+ return self.adapter_info.os_installer[const.INSTALLER_SETTINGS]
+
+ def get_pk_installer_settings(self):
+ return self.adapter_info.package_installer[const.INSTALLER_SETTINGS]
+
+ def get_os_config_metadata(self):
+ return self.adapter_info.metadata[const.OS_CONFIG]
+
+ def get_pk_config_meatadata(self):
+ return self.adapter_info.metadata[const.PK_CONFIG]
+
+ def get_adapter_all_flavors(self):
+ return self.adapter_info.flavor_list
+
+ def get_adapter_flavor(self, flavor_name):
+ return self.adapter_info.get_flavor(flavor_name)
+
+ def get_cluster_id(self):
+ return self.cluster_info.id
+
+ def get_clustername(self):
+ return self.cluster_info.name
+
+ def get_os_version(self):
+ return self.cluster_info.os_version
+
+ def get_cluster_os_config(self):
+ return self.cluster_info.os_config
+
+ def get_cluster_baseinfo(self):
+ return self.cluster_info.base_info
+
+ def get_cluster_flavor_name(self):
+ return self.cluster_info.flavor.get(const.FLAVOR_NAME)
+
+ def get_cluster_flavor_roles(self):
+ return self.cluster_info.flavor.get(const.ROLES, [])
+
+ def get_cluster_flavor_template(self):
+ return self.cluster_info.flavor.get(const.TMPL)
+
+ def get_cluster_package_config(self):
+ return self.cluster_info.package_config
+
+ def get_cluster_network_mapping(self):
+ mapping = self.cluster_info.network_mapping
+ logging.info("Network mapping in the config is '%s'!", mapping)
+ return mapping
+
+ def get_cluster_deployed_os_config(self):
+ return self.cluster_info.deployed_os_config
+
+ def get_cluster_deployed_package_config(self):
+ return self.cluster_info.deployed_package_config
+
+ def get_cluster_roles_mapping(self):
+ return self.cluster_info.roles_mapping
+
+ def get_cluster_patched_roles_mapping(self):
+ return self.cluster_info._get_cluster_patched_roles_mapping()
+
+ def validate_host(self, host_id):
+ if host_id not in self.hosts_info:
+ raise RuntimeError("host_id %s is invalid" % host_id)
+
+ def get_host_id_list(self):
+ return self.hosts_info.keys()
+
+ def get_hosts_id_list_for_os_installation(self):
+ """Get info of hosts which need to install/reinstall OS."""
+ return [
+ id for id, info in self.hosts_info.items()
+ if info.reinstall_os_flag
+ ]
+
+ def get_server_credentials(self):
+ cluster_os_config = self.get_cluster_os_config()
+ if not cluster_os_config:
+ logging.info("cluster os_config is None!")
+ return ()
+
+ username = cluster_os_config[const.SERVER_CREDS][const.USERNAME]
+ password = cluster_os_config[const.SERVER_CREDS][const.PASSWORD]
+ return (username, password)
+
+ def _get_host_info(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id]
+
+ def get_host_baseinfo(self, host_id):
+ self.validate_host(host_id)
+ host_info = self.hosts_info[host_id]
+ return host_info.baseinfo
+
+ def get_host_fullname(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].name
+
+ def get_host_dns(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].dns
+
+ def get_host_mac_address(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].mac
+
+ def get_hostname(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].hostname
+
+ def get_host_networks(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].networks
+
+ def get_host_interfaces(self, host_id):
+ # get interface names
+ return self.get_host_networks(host_id).keys()
+
+ def get_host_interface_ip(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].get_interface_ip(interface)
+
+ def get_host_interface_netmask(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].get_interface_netmask(interface)
+
+ def get_host_interface_subnet(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].get_interface_subnet(interface)
+
+ def is_interface_promiscuous(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].is_interface_promiscuous(interface)
+
+ def is_interface_mgmt(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].is_interface_mgmt(interface)
+
+ def get_host_os_config(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].os_config
+
+ def get_host_domain(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].domain
+
+ def get_host_network_mapping(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].network_mapping
+
+ def get_host_package_config(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].package_config
+
+ def get_host_deployed_os_config(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].deployed_os_config
+
+ def get_host_deployed_package_config(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].deployed_package_config
+
+ def get_host_roles(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].roles
+
+ def get_all_hosts_roles(self, hosts_id_list=None):
+ roles = []
+ for host_id, host_info in self.hosts_info.iteritems():
+ roles.extend(host_info.roles)
+
+ return list(set(roles))
+
+ def get_hosts_ip_settings(self, ip_settings, sys_intf_mappings):
+ logging.info(
+ "get_hosts_ip_settings:ip_settings=%s, sys_intf_mappings=%s" %
+ (ip_settings, sys_intf_mappings)
+ )
+
+ intf_alias = {}
+ for m in sys_intf_mappings:
+ if "vlan_tag" in m:
+ intf_alias[m["name"]] = m["name"]
+ else:
+ intf_alias[m["name"]] = m["interface"]
+
+ mappings = {}
+ hosts_id_list = self.get_host_id_list()
+ for host_id in hosts_id_list:
+ hostname = self.get_hostname(host_id)
+ mappings[hostname] = []
+ for ip_info in ip_settings:
+ logging.info("ip_info=%s" % ip_info)
+ new_ip_info = deepcopy(ip_info)
+ del new_ip_info["ip_ranges"]
+
+ ip_ranges = ip_info["ip_ranges"]
+ new_ip_info["netmask"] = netaddr.IPNetwork(
+ ip_info["cidr"]
+ ).netmask.bin.count("1")
+ new_ip_info["ip"] = get_ip_addr(ip_ranges).next()
+ new_ip_info["alias"] = intf_alias[ip_info["name"]]
+ mappings[hostname].append(new_ip_info)
+
+ return {"ip_settings": mappings}
+
+ def get_host_roles_mapping(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].roles_mapping
+
+ def get_host_ipmi_info(self, host_id):
+ self.validate_host(host_id)
+ if self.hosts_info[host_id].ipmi:
+ return (
+ self.hosts_info[host_id].ipmi[const.IP_ADDR],
+ self.hosts_info[host_id].ipmi
+ [const.IPMI_CREDS][const.USERNAME],
+ self.hosts_info[host_id].ipmi
+ [const.IPMI_CREDS][const.USERNAME])
+ else:
+ return (None, None, None)
diff --git a/compass-deck/deployment/installers/installer.py b/compass-deck/deployment/installers/installer.py
new file mode 100644
index 0000000..cfeb9e8
--- /dev/null
+++ b/compass-deck/deployment/installers/installer.py
@@ -0,0 +1,291 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
+
+
+"""Module to provider installer interface.
+"""
+from Cheetah.Template import Template
+from copy import deepcopy
+import imp
+import logging
+import os
+import simplejson as json
+
+from compass.deployment.installers.config_manager import BaseConfigManager
+from compass.utils import setting_wrapper as compass_setting
+from compass.utils import util
+
+
+CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
+
+
+class BaseInstaller(object):
+ """Interface for installer."""
+ NAME = 'installer'
+
+ def __repr__(self):
+ return '%r[%r]' % (self.__class__.__name__, self.NAME)
+
+ def deploy(self, **kwargs):
+ """virtual method to start installing process."""
+ raise NotImplementedError
+
+ def clean_progress(self, **kwargs):
+ raise NotImplementedError
+
+ def delete_hosts(self, **kwargs):
+ """Delete hosts from installer server."""
+ raise NotImplementedError
+
+ def redeploy(self, **kwargs):
+ raise NotImplementedError
+
+ def ready(self, **kwargs):
+ pass
+
+ def cluster_ready(self, **kwargs):
+ pass
+
+ def get_tmpl_vars_from_metadata(self, metadata, config):
+ """Get variables dictionary for rendering templates from metadata.
+
+ :param dict metadata: The metadata dictionary.
+ :param dict config: The
+ """
+ template_vars = {}
+ self._get_tmpl_vars_helper(metadata, config, template_vars)
+
+ return template_vars
+
+ def _get_key_mapping(self, metadata, key, is_regular_key):
+ """Get the keyword which the input key maps to.
+
+ This keyword will be added to dictionary used to render templates.
+
+ If the key in metadata has a mapping to another keyword which is
+ used for templates, then return this keyword. If the key is started
+ with '$', which is a variable in metadata, return the key itself as
+ the mapping keyword. If the key has no mapping, return None.
+
+ :param dict metadata: metadata/submetadata dictionary.
+ :param str key: The keyword defined in metadata.
+ :param bool is_regular_key: False when the key defined in metadata
+ is a variable(starting with '$').
+ """
+ mapping_to = key
+ if is_regular_key:
+ try:
+ mapping_to = metadata['_self']['mapping_to']
+ except Exception:
+ mapping_to = None
+
+ return mapping_to
+
+ def _get_submeta_by_key(self, metadata, key):
+ """Get submetadata dictionary.
+
+ Based on current metadata key. And
+ determines the input key is a regular string keyword or a variable
+ keyword defined in metadata, which starts with '$'.
+
+ :param dict metadata: The metadata dictionary.
+ :param str key: The keyword defined in the metadata.
+ """
+ if key in metadata:
+ return (True, metadata[key])
+
+ temp = deepcopy(metadata)
+ if '_self' in temp:
+ del temp['_self']
+ meta_key = temp.keys()[0]
+ if meta_key.startswith("$"):
+ return (False, metadata[meta_key])
+
+ raise KeyError("'%s' is invalid in metadata '%s'!" % (key, metadata))
+
+ def _get_tmpl_vars_helper(self, metadata, config, output):
+ for key, config_value in sorted(config.iteritems()):
+ is_regular_key, sub_meta = self._get_submeta_by_key(metadata, key)
+ mapping_to = self._get_key_mapping(sub_meta, key, is_regular_key)
+
+ if isinstance(config_value, dict):
+ if mapping_to:
+ new_output = output[mapping_to] = {}
+ else:
+ new_output = output
+
+ self._get_tmpl_vars_helper(sub_meta, config_value, new_output)
+
+ elif mapping_to:
+ output[mapping_to] = config_value
+
+ def get_config_from_template(self, tmpl_path, vars_dict):
+ logging.debug("template path is %s", tmpl_path)
+ logging.debug("vars_dict is %s", vars_dict)
+
+ if not os.path.exists(tmpl_path) or not vars_dict:
+ logging.info("Template dir or vars_dict is None!")
+ return {}
+
+ searchList = []
+ copy_vars_dict = deepcopy(vars_dict)
+ for key, value in vars_dict.iteritems():
+ if isinstance(value, dict):
+ temp = copy_vars_dict[key]
+ del copy_vars_dict[key]
+ searchList.append(temp)
+ searchList.append(copy_vars_dict)
+
+ # Load base template first if it exists
+ base_config = {}
+ base_tmpl_path = os.path.join(os.path.dirname(tmpl_path), 'base.tmpl')
+ if os.path.isfile(base_tmpl_path) and base_tmpl_path != tmpl_path:
+ base_tmpl = Template(file=base_tmpl_path, searchList=searchList)
+ base_config = json.loads(base_tmpl.respond(), encoding='utf-8')
+ base_config = json.loads(json.dumps(base_config), encoding='utf-8')
+
+ # Load specific template for current adapter
+ tmpl = Template(file=open(tmpl_path, "r"), searchList=searchList)
+ config = json.loads(tmpl.respond(), encoding='utf-8')
+ config = json.loads(json.dumps(config), encoding='utf-8')
+
+ # Merge the two outputs
+ config = util.merge_dict(base_config, config)
+
+ logging.debug("get_config_from_template resulting %s", config)
+ return config
+
+ @classmethod
+ def get_installer(cls, name, path, adapter_info, cluster_info, hosts_info):
+ try:
+ mod_file, path, descr = imp.find_module(name, [path])
+ if mod_file:
+ mod = imp.load_module(name, mod_file, path, descr)
+ config_manager = BaseConfigManager(adapter_info, cluster_info,
+ hosts_info)
+ return getattr(mod, mod.NAME)(config_manager)
+
+ except ImportError as exc:
+ logging.error('No such module found: %s', name)
+ logging.exception(exc)
+
+ return None
+
+
+class OSInstaller(BaseInstaller):
+ """Interface for os installer."""
+ NAME = 'OSInstaller'
+ INSTALLER_BASE_DIR = os.path.join(CURRENT_DIR, 'os_installers')
+
+ def get_oses(self):
+ """virtual method to get supported oses.
+
+ :returns: list of str, each is the supported os version.
+ """
+ return []
+
+ @classmethod
+ def get_installer(cls, name, adapter_info, cluster_info, hosts_info):
+ if name is None:
+ logging.info("Installer name is None! No OS installer loaded!")
+ return None
+
+ path = os.path.join(cls.INSTALLER_BASE_DIR, name)
+ installer = super(OSInstaller, cls).get_installer(name, path,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ if not isinstance(installer, OSInstaller):
+ logging.info("Installer '%s' is not an OS installer!" % name)
+ return None
+
+ return installer
+
+ def poweron(self, host_id):
+ pass
+
+ def poweroff(self, host_id):
+ pass
+
+ def reset(self, host_id):
+ pass
+
+
+class PKInstaller(BaseInstaller):
+ """Interface for package installer."""
+ NAME = 'PKInstaller'
+ INSTALLER_BASE_DIR = os.path.join(CURRENT_DIR, 'pk_installers')
+
+ def generate_installer_config(self):
+ raise NotImplementedError(
+ 'generate_installer_config is not defined in %s',
+ self.__class__.__name__
+ )
+
+ def get_target_systems(self):
+ """virtual method to get available target_systems for each os.
+
+ :param oses: supported os versions.
+ :type oses: list of st
+
+ :returns: dict of os_version to target systems as list of str.
+ """
+ return {}
+
+ def get_roles(self, target_system):
+ """virtual method to get all roles of given target system.
+
+ :param target_system: target distributed system such as OpenStack.
+ :type target_system: str
+
+ :returns: dict of role to role description as str.
+ """
+ return {}
+
+ def os_ready(self, **kwargs):
+ pass
+
+ def cluster_os_ready(self, **kwargs):
+ pass
+
+ def serialize_config(self, config, destination):
+ with open(destination, "w") as f:
+ f.write(config)
+
+ @classmethod
+ def get_installer(cls, name, adapter_info, cluster_info, hosts_info):
+ if name is None:
+ logging.info("Install name is None. No package installer loaded!")
+ return None
+
+ path = os.path.join(cls.INSTALLER_BASE_DIR, name)
+ if not os.path.exists(path):
+ path = os.path.join(os.path.join(os.path.join(
+ compass_setting.PLUGINS_DIR, name), "implementation"), name)
+ if not os.path.exists(path):
+ logging.info("Installer '%s' does not exist!" % name)
+ return None
+ installer = super(PKInstaller, cls).get_installer(name, path,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ if not isinstance(installer, PKInstaller):
+ logging.info("Installer '%s' is not a package installer!" % name)
+ return None
+
+ return installer
diff --git a/compass-deck/deployment/installers/os_installers/__init__.py b/compass-deck/deployment/installers/os_installers/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-deck/deployment/installers/os_installers/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/deployment/installers/os_installers/cobbler/__init__.py b/compass-deck/deployment/installers/os_installers/cobbler/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-deck/deployment/installers/os_installers/cobbler/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/deployment/installers/os_installers/cobbler/cobbler.py b/compass-deck/deployment/installers/os_installers/cobbler/cobbler.py
new file mode 100644
index 0000000..302c9be
--- /dev/null
+++ b/compass-deck/deployment/installers/os_installers/cobbler/cobbler.py
@@ -0,0 +1,449 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""os installer cobbler plugin.
+"""
+import logging
+import os
+import shutil
+import xmlrpclib
+
+from compass.deployment.installers.installer import OSInstaller
+from compass.deployment.utils import constants as const
+from compass.utils import setting_wrapper as compass_setting
+from compass.utils import util
+from copy import deepcopy
+
+
+NAME = 'CobblerInstaller'
+
+
+class CobblerInstaller(OSInstaller):
+ """cobbler installer"""
+ CREDENTIALS = "credentials"
+ USERNAME = 'username'
+ PASSWORD = 'password'
+
+ INSTALLER_URL = "cobbler_url"
+ TMPL_DIR = 'tmpl_dir'
+ SYS_TMPL = 'system.tmpl'
+ SYS_TMPL_NAME = 'system.tmpl'
+ SYS_PROFILE_NAME = 'profile.tmpl'
+ PROFILE = 'profile'
+
+ POWER_TYPE = 'power_type'
+ POWER_ADDR = 'power_address'
+ POWER_USER = 'power_user'
+ POWER_PASS = 'power_pass'
+
+ def __init__(self, config_manager):
+ super(CobblerInstaller, self).__init__()
+
+ self.config_manager = config_manager
+ installer_settings = self.config_manager.get_os_installer_settings()
+ try:
+ username = installer_settings[self.CREDENTIALS][self.USERNAME]
+ password = installer_settings[self.CREDENTIALS][self.PASSWORD]
+ cobbler_url = installer_settings[self.INSTALLER_URL]
+ self.tmpl_dir = CobblerInstaller.get_tmpl_path()
+
+ except KeyError as ex:
+ raise KeyError(ex.message)
+
+ # The connection is created when cobbler installer is initialized.
+ self.remote = self._get_cobbler_server(cobbler_url)
+ self.token = self._get_token(username, password)
+ self.pk_installer_config = None
+
+ logging.debug('%s instance created', 'CobblerInstaller')
+
+ @classmethod
+ def get_tmpl_path(cls):
+ return os.path.join(compass_setting.TMPL_DIR, 'cobbler')
+
+ def __repr__(self):
+ return '%r[remote=%r,token=%r' % (
+ self.__class__.__name__, self.remote, self.token)
+
+ def _get_cobbler_server(self, cobbler_url):
+ if not cobbler_url:
+ logging.error("Cobbler URL is None!")
+ raise Exception("Cobbler URL cannot be None!")
+
+ return xmlrpclib.Server(cobbler_url)
+
+ def _get_token(self, username, password):
+ if self.remote is None:
+ raise Exception("Cobbler remote instance is None!")
+ return self.remote.login(username, password)
+
+ def get_supported_oses(self):
+ """get supported os versions.
+
+ note::
+ In cobbler, we treat profile name as the indicator
+ of os version. It is just a simple indicator
+ and not accurate.
+ """
+ profiles = self.remote.get_profiles()
+ oses = []
+ for profile in profiles:
+ oses.append(profile['name'])
+ return oses
+
+ def deploy(self):
+ """Sync cobbler to catch up the latest update config and start to
+
+ install OS. Return both cluster and hosts deploy configs. The return
+ format:
+ {
+ "cluster": {
+ "id": 1,
+ "deployed_os_config": {},
+ },
+ "hosts": {
+ 1($clusterhost_id): {
+ "deployed_os_config": {...},
+ },
+ ....
+ }
+ }
+ """
+ host_ids = self.config_manager.get_hosts_id_list_for_os_installation()
+ if not host_ids:
+ # No hosts need to install OS
+ logging.info("Cobbler: No host needs to install OS.")
+ return {}
+
+ os_version = self.config_manager.get_os_version()
+ profile = self._get_profile_from_server(os_version)
+
+ global_vars_dict = self._get_cluster_tmpl_vars_dict()
+
+ self.update_profile_config_to_cobbler(profile, global_vars_dict)
+
+ hosts_deploy_config = {}
+
+ for host_id in host_ids:
+ hostname = self.config_manager.get_hostname(host_id)
+ vars_dict = self._get_host_tmpl_vars_dict(host_id,
+ global_vars_dict,
+ hostname=hostname,
+ profile=profile)
+
+ self.update_host_config_to_cobbler(host_id, hostname, vars_dict)
+
+ # set host deploy config
+ host_config = {}
+ host_config[const.DEPLOYED_OS_CONFIG] = vars_dict[const.OS_CONFIG]
+ hosts_deploy_config[host_id] = host_config
+
+ # sync to cobbler and trigger installtion.
+ self._sync()
+
+ cluster_config = global_vars_dict.setdefault(const.OS_CONFIG, {})
+
+ return {
+ const.CLUSTER: {
+ const.ID: self.config_manager.get_cluster_id(),
+ const.DEPLOYED_OS_CONFIG: cluster_config
+ },
+ const.HOSTS: hosts_deploy_config
+ }
+
+ def clean_progress(self):
+ """clean log files and config for hosts which to deploy."""
+ clusterhost_list = self.config_manager.get_host_id_list()
+ log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[NAME]
+
+ for host_id in clusterhost_list:
+ hostname = self.config_manager.get_hostname(host_id)
+ self._clean_log(log_dir_prefix, hostname)
+
+ def redeploy(self):
+ """redeploy hosts."""
+ host_ids = self.config_manager.get_host_id_list()
+ if not host_ids:
+ logging.info("Cobbler: hostlist is None, no host is redeployed")
+ return
+ for host_id in host_ids:
+ hostname = self.config_manager.get_hostname(host_id)
+ sys_id = self._get_create_system(hostname)
+ if sys_id:
+ # enable netboot for this host
+ self._netboot_enabled(sys_id)
+
+ self._sync()
+
+ def set_package_installer_config(self, package_configs):
+ """Cobbler can install and configure package installer right after
+
+ OS installation compelets by setting package_config info provided
+ by package installer.
+
+ :param dict package_configs: The dict of config generated by package
+ installer for each clusterhost. The IDs
+ of clusterhosts are the keys of
+ package_configs.
+ """
+ self.pk_installer_config = package_configs
+
+ def _sync(self):
+ """Sync the updated config to cobbler and trigger installation."""
+ try:
+ self.remote.sync(self.token)
+ os.system('sudo service rsyslog restart')
+ except Exception as ex:
+ logging.debug("Failed to sync cobbler server! Error: %s" % ex)
+ raise ex
+
+ def dump_system_info(self, host_id):
+
+ hostname = self.config_manager.get_hostname(host_id)
+ if self.remote is None or not hostname:
+ logging.info("[dump_system_info]Remote or hostname is None.")
+ return {}
+
+ return self.remote.get_system_as_rendered(hostname)
+
+ def _generate_system_config(self, host_id, host_vars_dict):
+ """Generate updated system config from the template.
+
+ :param host_vars_dict: dict of variables for the system template to
+ generate system config dict for each host.
+ """
+ os_version = self.config_manager.get_os_version()
+
+ tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, os_version), self.SYS_TMPL_NAME
+ )
+ if not os.path.exists(tmpl_path):
+ err_msg = "Template '%s' does not exists!" % tmpl_path
+ logging.error(err_msg)
+ raise Exception(err_msg)
+
+ system_config = self.get_config_from_template(tmpl_path,
+ host_vars_dict)
+
+ # update package config info to cobbler ksmeta
+ if self.pk_installer_config and host_id in self.pk_installer_config:
+ pk_config = self.pk_installer_config[host_id]
+ ksmeta = system_config.setdefault("ksmeta", {})
+ util.merge_dict(ksmeta, pk_config)
+ system_config["ksmeta"] = ksmeta
+
+ return system_config
+
+ def _generate_profile_config(self, cluster_vars_dict):
+ os_version = self.config_manager.get_os_version()
+ tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, os_version), self.SYS_PROFILE_NAME
+ )
+
+ return self.get_config_from_template(tmpl_path, cluster_vars_dict)
+
+ def _get_profile_from_server(self, os_version):
+ """Get profile from cobbler server."""
+ result = self.remote.find_profile({'name': os_version})
+ if not result:
+ raise Exception("Cannot find profile for '%s'", os_version)
+
+ profile = result[0]
+ return profile
+
+ def _get_create_system(self, hostname):
+ """get system reference id for the host."""
+ sys_name = hostname
+ sys_id = None
+ system_info = self.remote.find_system({"name": hostname})
+
+ if not system_info:
+ # Create a new system
+ sys_id = self.remote.new_system(self.token)
+ self.remote.modify_system(sys_id, "name", hostname, self.token)
+ logging.debug('create new system %s for %s', sys_id, sys_name)
+ else:
+ sys_id = self.remote.get_system_handle(sys_name, self.token)
+
+ return sys_id
+
+ def _get_profile_id(self, profilename):
+ """get profile reference id for the cluster."""
+ return self.remote.get_profile_handle(profilename, self.token)
+
+ def _clean_system(self, hostname):
+ """clean system."""
+ sys_name = hostname
+ try:
+ self.remote.remove_system(sys_name, self.token)
+ logging.debug('system %s is removed', sys_name)
+ except Exception:
+ logging.debug('no system %s found to remove', sys_name)
+
+ def _update_system_config(self, sys_id, system_config):
+ """update modify system."""
+ for key, value in system_config.iteritems():
+ self.remote.modify_system(sys_id, str(key), value, self.token)
+
+ self.remote.save_system(sys_id, self.token)
+
+ def _update_profile_config(self, profile_id, profile_config):
+ for key, value in profile_config.iteritems():
+ self.remote.modify_profile(profile_id, str(key), value, self.token)
+
+ self.remote.save_profile(profile_id, self.token)
+
+ def _netboot_enabled(self, sys_id):
+ """enable netboot."""
+ self.remote.modify_system(sys_id, 'netboot_enabled', True, self.token)
+ self.remote.save_system(sys_id, self.token)
+
+ def _clean_log(self, log_dir_prefix, system_name):
+ """clean log."""
+ log_dir = os.path.join(log_dir_prefix, system_name)
+ shutil.rmtree(log_dir, True)
+
+ def update_host_config_to_cobbler(self, host_id, hostname, host_vars_dict):
+ """update host config and upload to cobbler server."""
+ sys_id = self._get_create_system(hostname)
+
+ system_config = self._generate_system_config(host_id, host_vars_dict)
+ logging.debug('%s system config to update: %s', host_id, system_config)
+
+ self._update_system_config(sys_id, system_config)
+ self._netboot_enabled(sys_id)
+
+ def update_profile_config_to_cobbler(self, profilename, cluster_vars_dict):
+ """update profile config and upload to cobbler server."""
+
+ profile_id = self._get_profile_id(profilename)
+
+ profile_config = self._generate_profile_config(cluster_vars_dict)
+ logging.debug(
+ '%s profile config to update: %s', profilename, profile_config
+ )
+
+ self._update_profile_config(profile_id, profile_config)
+
+ def delete_hosts(self):
+ hosts_id_list = self.config_manager.get_host_id_list()
+ logging.debug('delete hosts %s', hosts_id_list)
+ for host_id in hosts_id_list:
+ self.delete_single_host(host_id)
+ self._sync()
+
+ def delete_single_host(self, host_id):
+ """Delete the host from cobbler server and clean up the installation
+
+ progress.
+ """
+ hostname = self.config_manager.get_hostname(host_id)
+ try:
+ log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[NAME]
+ self._clean_system(hostname)
+ self._clean_log(log_dir_prefix, hostname)
+ except Exception as ex:
+ logging.error("Deleting host got exception: %s", ex)
+ logging.exception(ex)
+
+ def _get_host_tmpl_vars_dict(self, host_id, global_vars_dict, **kwargs):
+ """Generate template variables dictionary."""
+ vars_dict = {}
+ if global_vars_dict:
+ # Set cluster template vars_dict from cluster os_config.
+ vars_dict = deepcopy(global_vars_dict)
+
+ # Set hostname, MAC address and hostname, networks, dns and so on.
+ host_baseinfo = self.config_manager.get_host_baseinfo(host_id)
+ vars_dict[const.BASEINFO] = host_baseinfo
+
+ # Set profile
+ if self.PROFILE in kwargs:
+ profile = kwargs[self.PROFILE]
+ else:
+ os_version = self.config_manager.get_os_version()
+ profile = self._get_profile_from_server(os_version)
+
+ vars_dict[const.BASEINFO][self.PROFILE] = profile
+
+ metadata = self.config_manager.get_os_config_metadata()
+ os_config = self.config_manager.get_host_os_config(host_id)
+
+ # Get template variables values from host os_config
+ host_vars_dict = self.get_tmpl_vars_from_metadata(metadata, os_config)
+ util.merge_dict(
+ vars_dict.setdefault(const.OS_CONFIG, {}), host_vars_dict
+ )
+ return vars_dict
+
+ def _get_cluster_tmpl_vars_dict(self):
+ metadata = self.config_manager.get_os_config_metadata()
+ os_config = self.config_manager.get_cluster_os_config()
+
+ cluster_vas_dict = {}
+ cluster_vas_dict[const.OS_CONFIG] = \
+ self.get_tmpl_vars_from_metadata(metadata, os_config)
+
+ return cluster_vas_dict
+
+ def _check_and_set_system_impi(self, host_id, sys_id):
+ if not sys_id:
+ logging.info("System is None!")
+ return False
+
+ system = self.dump_system_info(host_id)
+ if system[self.POWER_TYPE] != 'ipmilan' or not system[self.POWER_USER]:
+ # Set sytem power type to ipmilan if needs and set IPMI info
+ ipmi_info = self.config_manager.get_host_ipmi_info(host_id)
+ if not ipmi_info:
+ logging.info('No IPMI information found! Failed power on.')
+ return False
+
+ ipmi_ip, ipmi_user, ipmi_pass = ipmi_info
+ power_opts = {}
+ power_opts[self.POWER_TYPE] = 'ipmilan'
+ power_opts[self.POWER_ADDR] = ipmi_ip
+ power_opts[self.POWER_USER] = ipmi_user
+ power_opts[self.POWER_PASS] = ipmi_pass
+
+ self._update_system_config(sys_id, power_opts)
+
+ return True
+
+ def poweron(self, host_id):
+ hostname = self.config_manager.get_hostname(host_id)
+ sys_id = self._get_create_system(hostname)
+ if not self._check_and_set_system_impi(sys_id):
+ return
+
+ self.remote.power_system(sys_id, self.token, power='on')
+ logging.info("Host with ID=%d starts to power on!" % host_id)
+
+ def poweroff(self, host_id):
+ hostname = self.config_manager.get_hostname(host_id)
+ sys_id = self._get_create_system(hostname)
+ if not self._check_and_set_system_impi(sys_id):
+ return
+
+ self.remote.power_system(sys_id, self.token, power='off')
+ logging.info("Host with ID=%d starts to power off!" % host_id)
+
+ def reset(self, host_id):
+ hostname = self.config_manager.get_hostname(host_id)
+ sys_id = self._get_create_system(hostname)
+ if not self._check_and_set_system_impi(sys_id):
+ return
+
+ self.remote.power_system(sys_id, self.token, power='reboot')
+ logging.info("Host with ID=%d starts to reboot!" % host_id)
diff --git a/compass-deck/deployment/installers/pk_installers/__init__.py b/compass-deck/deployment/installers/pk_installers/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-deck/deployment/installers/pk_installers/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/deployment/installers/pk_installers/ansible_installer/__init__.py b/compass-deck/deployment/installers/pk_installers/ansible_installer/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/deployment/installers/pk_installers/ansible_installer/__init__.py
diff --git a/compass-deck/deployment/installers/pk_installers/ansible_installer/ansible_installer.py b/compass-deck/deployment/installers/pk_installers/ansible_installer/ansible_installer.py
new file mode 100644
index 0000000..345f4e0
--- /dev/null
+++ b/compass-deck/deployment/installers/pk_installers/ansible_installer/ansible_installer.py
@@ -0,0 +1,401 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__auther__ = "Compass Dev Team (dev-team@syscompass.org)"
+
+"""package installer: ansible plugin."""
+
+from Cheetah.Template import Template
+from copy import deepcopy
+import json
+import logging
+import os
+import re
+import shutil
+import subprocess
+
+from compass.deployment.installers.installer import PKInstaller
+from compass.deployment.utils import constants as const
+from compass.utils import setting_wrapper as compass_setting
+from compass.utils import util
+
+NAME = "AnsibleInstaller"
+
+
+def byteify(input):
+ if isinstance(input, dict):
+ return dict([(byteify(key), byteify(value))
+ for key, value in input.iteritems()])
+ elif isinstance(input, list):
+ return [byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+
+class AnsibleInstaller(PKInstaller):
+ INVENTORY_TMPL_DIR = 'inventories'
+ GROUPVARS_TMPL_DIR = 'vars'
+ INVENTORY_PATCH_TEMPALTE_DIR = 'inventories'
+
+ # keywords in package installer settings
+ ANSIBLE_DIR = 'ansible_dir'
+ ANSIBLE_RUN_DIR = 'ansible_run_dir'
+ LOG_FILE = 'ansible_log_file'
+ ANSIBLE_CONFIG = 'ansible_config'
+ INVENTORY = 'inventory_file'
+ GROUP_VARIABLE = 'group_variable'
+ HOSTS_PATH = 'etc_hosts_path'
+ RUNNER_DIRS = 'runner_dirs'
+
+ def __init__(self, config_manager):
+ super(AnsibleInstaller, self).__init__()
+
+ self.config_manager = config_manager
+ self.tmpl_name = self.config_manager.get_cluster_flavor_template()
+ self.installer_settings = (
+ self.config_manager.get_pk_installer_settings()
+ )
+ settings = self.installer_settings
+ self.ansible_dir = settings.setdefault(self.ANSIBLE_DIR, None)
+ self.ansible_run_dir = (
+ settings.setdefault(self.ANSIBLE_RUN_DIR, None)
+ )
+ self.log_file = settings.setdefault(self.LOG_FILE, None)
+ self.ansible_config = (
+ settings.setdefault(self.ANSIBLE_CONFIG, None)
+ )
+ self.inventory = settings.setdefault(self.INVENTORY, None)
+ self.group_variable = (
+ settings.setdefault(self.GROUP_VARIABLE, None)
+ )
+ self.hosts_path = (
+ settings.setdefault(self.HOSTS_PATH, None)
+ )
+ self.runner_dirs = (
+ settings.setdefault(self.RUNNER_DIRS, None)
+ )
+ self.playbook = self.tmpl_name.replace('tmpl', 'yml')
+ self.runner_files = [self.playbook]
+
+ adapter_name = self.config_manager.get_dist_system_name()
+ self.tmpl_dir = AnsibleInstaller.get_tmpl_path(adapter_name)
+ self.adapter_dir = os.path.join(self.ansible_dir, adapter_name)
+ logging.debug('%s instance created', self)
+
+ @classmethod
+ def get_tmpl_path(cls, adapter_name):
+ tmpl_path = os.path.join(
+ os.path.join(compass_setting.TMPL_DIR, 'ansible_installer'),
+ adapter_name
+ )
+ return tmpl_path
+
+ def __repr__(self):
+ return '%s[name=%s,installer_url=%s]' % (
+ self.__class__.__name__, self.NAME, self.installer_url)
+
+ def generate_installer_config(self):
+ """Render ansible config file by OS installing.
+
+ The output format:
+ {
+ '1'($host_id/clusterhost_id):{
+ 'tool': 'ansible',
+ },
+ .....
+ }
+ """
+ host_ids = self.config_manager.get_host_id_list()
+ os_installer_configs = {}
+ for host_id in host_ids:
+ temp = {
+ "tool": "ansible",
+ }
+ os_installer_configs[host_id] = temp
+
+ return os_installer_configs
+
+ def get_env_name(self, dist_sys_name, cluster_name):
+ return "-".join((dist_sys_name, cluster_name))
+
+ def _get_cluster_tmpl_vars(self):
+ """Generate template variables dict
+
+ Generates based on cluster level config.
+ The vars_dict will be:
+ {
+ "baseinfo": {
+ "id":1,
+ "name": "cluster01",
+ ...
+ },
+ "package_config": {
+ .... //mapped from original package config based on metadata
+ },
+ "role_mapping": {
+ ....
+ }
+ }
+ """
+ cluster_vars_dict = {}
+ # set cluster basic information to vars_dict
+ cluster_baseinfo = self.config_manager.get_cluster_baseinfo()
+ cluster_vars_dict[const.BASEINFO] = cluster_baseinfo
+
+ # get and set template variables from cluster package config.
+ pk_metadata = self.config_manager.get_pk_config_meatadata()
+ pk_config = self.config_manager.get_cluster_package_config()
+
+ # get os config as ansible needs them
+ os_metadata = self.config_manager.get_os_config_metadata()
+ os_config = self.config_manager.get_cluster_os_config()
+
+ pk_meta_dict = self.get_tmpl_vars_from_metadata(pk_metadata, pk_config)
+ os_meta_dict = self.get_tmpl_vars_from_metadata(os_metadata, os_config)
+ util.merge_dict(pk_meta_dict, os_meta_dict)
+
+ cluster_vars_dict[const.PK_CONFIG] = pk_meta_dict
+
+ # get and set roles_mapping to vars_dict
+ mapping = self.config_manager.get_cluster_roles_mapping()
+ logging.info("cluster role mapping is %s", mapping)
+ cluster_vars_dict[const.ROLES_MAPPING] = mapping
+
+ # get ip settings to vars_dict
+ hosts_ip_settings = self.config_manager.get_hosts_ip_settings(
+ pk_meta_dict["network_cfg"]["ip_settings"],
+ pk_meta_dict["network_cfg"]["sys_intf_mappings"]
+ )
+ logging.info("hosts_ip_settings is %s", hosts_ip_settings)
+ cluster_vars_dict["ip_settings"] = hosts_ip_settings
+
+ return byteify(cluster_vars_dict)
+
+ def _generate_inventory_attributes(self, global_vars_dict):
+ inventory_tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, self.INVENTORY_TMPL_DIR),
+ self.tmpl_name
+ )
+ if not os.path.exists(inventory_tmpl_path):
+ logging.error(
+ "Inventory template '%s' does not exist", self.tmpl_name
+ )
+ raise Exception("Template '%s' does not exist!" % self.tmpl_name)
+
+ return self.get_config_from_template(
+ inventory_tmpl_path, global_vars_dict
+ )
+
+ def _generate_group_vars_attributes(self, global_vars_dict):
+ logging.info("global vars dict is %s", global_vars_dict)
+ group_vars_tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, self.GROUPVARS_TMPL_DIR),
+ self.tmpl_name
+ )
+ if not os.path.exists(group_vars_tmpl_path):
+ logging.error("Vars template '%s' does not exist",
+ self.tmpl_name)
+ raise Exception("Template '%s' does not exist!" % self.tmpl_name)
+
+ return self.get_config_from_template(
+ group_vars_tmpl_path, global_vars_dict
+ )
+
+ def _generate_hosts_attributes(self, global_vars_dict):
+ hosts_tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, 'hosts'), self.tmpl_name
+ )
+ if not os.path.exists(hosts_tmpl_path):
+ logging.error("Hosts template '%s' does not exist", self.tmpl_name)
+ raise Exception("Template '%s' does not exist!" % self.tmpl_name)
+
+ return self.get_config_from_template(hosts_tmpl_path, global_vars_dict)
+
+ def _generate_ansible_cfg_attributes(self, global_vars_dict):
+ ansible_cfg_tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, 'ansible_cfg'), self.tmpl_name
+ )
+ if not os.path.exists(ansible_cfg_tmpl_path):
+ logging.error("cfg template '%s' does not exist", self.tmpl_name)
+ raise Exception("Template '%s' does not exist!" % self.tmpl_name)
+
+ return self.get_config_from_template(
+ ansible_cfg_tmpl_path,
+ global_vars_dict
+ )
+
+ def get_config_from_template(self, tmpl_path, vars_dict):
+ logging.debug("vars_dict is %s", vars_dict)
+
+ if not os.path.exists(tmpl_path) or not vars_dict:
+ logging.info("Template dir or vars_dict is None!")
+ return {}
+
+ searchList = []
+ copy_vars_dict = deepcopy(vars_dict)
+ for key, value in vars_dict.iteritems():
+ if isinstance(value, dict):
+ temp = copy_vars_dict[key]
+ del copy_vars_dict[key]
+ searchList.append(temp)
+ searchList.append(copy_vars_dict)
+
+ # Load specific template for current adapter
+ tmpl = Template(file=open(tmpl_path, "r"), searchList=searchList)
+ return tmpl.respond()
+
+ def _create_ansible_run_env(self, env_name, ansible_run_destination):
+ if os.path.exists(ansible_run_destination):
+ shutil.rmtree(ansible_run_destination, True)
+
+ os.mkdir(ansible_run_destination)
+
+ # copy roles to run env
+ dirs = self.runner_dirs
+ files = self.runner_files
+ for dir in dirs:
+ if not os.path.exists(os.path.join(self.ansible_dir, dir)):
+ continue
+ os.system(
+ "cp -rf %s %s" % (
+ os.path.join(self.ansible_dir, dir),
+ ansible_run_destination
+ )
+ )
+ for file in files:
+ logging.info('file is %s', file)
+ shutil.copy(
+ os.path.join(self.adapter_dir, file),
+ os.path.join(
+ ansible_run_destination,
+ file
+ )
+ )
+
+ def prepare_ansible(self, env_name, global_vars_dict):
+ ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
+ if os.path.exists(ansible_run_destination):
+ ansible_run_destination += "-expansion"
+ self._create_ansible_run_env(env_name, ansible_run_destination)
+ inv_config = self._generate_inventory_attributes(global_vars_dict)
+ inventory_dir = os.path.join(ansible_run_destination, 'inventories')
+
+ vars_config = self._generate_group_vars_attributes(global_vars_dict)
+ vars_dir = os.path.join(ansible_run_destination, 'group_vars')
+
+ hosts_config = self._generate_hosts_attributes(global_vars_dict)
+ hosts_destination = os.path.join(
+ ansible_run_destination, self.hosts_path
+ )
+
+ cfg_config = self._generate_ansible_cfg_attributes(global_vars_dict)
+ cfg_destination = os.path.join(
+ ansible_run_destination,
+ self.ansible_config
+ )
+
+ os.mkdir(inventory_dir)
+ os.mkdir(vars_dir)
+
+ inventory_destination = os.path.join(inventory_dir, self.inventory)
+ group_vars_destination = os.path.join(vars_dir, self.group_variable)
+ self.serialize_config(inv_config, inventory_destination)
+ self.serialize_config(vars_config, group_vars_destination)
+ self.serialize_config(hosts_config, hosts_destination)
+ self.serialize_config(cfg_config, cfg_destination)
+
+ def deploy(self):
+ """Start to deploy a distributed system.
+
+ Return both cluster and hosts deployed configs.
+ The return format:
+ {
+ "cluster": {
+ "id": 1,
+ "deployed_package_config": {
+ "roles_mapping": {...},
+ "service_credentials": {...},
+ ....
+ }
+ },
+ "hosts": {
+ 1($clusterhost_id): {
+ "deployed_package_config": {...}
+ },
+ ....
+ }
+ }
+ """
+ host_list = self.config_manager.get_host_id_list()
+ if not host_list:
+ return {}
+
+ adapter_name = self.config_manager.get_adapter_name()
+ cluster_name = self.config_manager.get_clustername()
+ env_name = self.get_env_name(adapter_name, cluster_name)
+
+ global_vars_dict = self._get_cluster_tmpl_vars()
+ logging.info(
+ '%s var dict: %s', self.__class__.__name__, global_vars_dict
+ )
+ # Create ansible related files
+ self.prepare_ansible(env_name, global_vars_dict)
+
+ def patch(self, patched_role_mapping):
+ adapter_name = self.config_manager.get_adapter_name()
+ cluster_name = self.config_manager.get_clustername()
+ env_name = self.get_env_name(adapter_name, cluster_name)
+ ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
+ inventory_dir = os.path.join(ansible_run_destination, 'inventories')
+ patched_global_vars_dict = self._get_cluster_tmpl_vars()
+ mapping = self.config_manager.get_cluster_patched_roles_mapping()
+ patched_global_vars_dict['roles_mapping'] = mapping
+ patched_inv = self._generate_inventory_attributes(
+ patched_global_vars_dict)
+ inv_file = os.path.join(inventory_dir, 'patched_inventory.yml')
+ self.serialize_config(patched_inv, inv_file)
+ config_file = os.path.join(
+ ansible_run_destination, self.ansible_config
+ )
+ playbook_file = os.path.join(ansible_run_destination, self.playbook)
+ log_file = os.path.join(ansible_run_destination, 'patch.log')
+ cmd = "ANSIBLE_CONFIG=%s ansible-playbook -i %s %s" % (config_file,
+ inv_file,
+ playbook_file)
+ with open(log_file, 'w') as logfile:
+ subprocess.Popen(cmd, shell=True, stdout=logfile, stderr=logfile)
+ return patched_role_mapping
+
+ def cluster_os_ready(self):
+ adapter_name = self.config_manager.get_adapter_name()
+ cluster_name = self.config_manager.get_clustername()
+ env_name = self.get_env_name(adapter_name, cluster_name)
+ ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
+ expansion_dir = ansible_run_destination + "-expansion"
+ if os.path.exists(expansion_dir):
+ ansible_run_destination = expansion_dir
+ inventory_dir = os.path.join(ansible_run_destination, 'inventories')
+ inventory_file = os.path.join(inventory_dir, self.inventory)
+ playbook_file = os.path.join(ansible_run_destination, self.playbook)
+ log_file = os.path.join(ansible_run_destination, 'run.log')
+ config_file = os.path.join(
+ ansible_run_destination, self.ansible_config
+ )
+ cmd = "ANSIBLE_CONFIG=%s ansible-playbook -i %s %s" % (config_file,
+ inventory_file,
+ playbook_file)
+ with open(log_file, 'w') as logfile:
+ subprocess.Popen(cmd, shell=True, stdout=logfile, stderr=logfile)
diff --git a/compass-deck/deployment/utils/__init__.py b/compass-deck/deployment/utils/__init__.py
new file mode 100644
index 0000000..cbd36e0
--- /dev/null
+++ b/compass-deck/deployment/utils/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
diff --git a/compass-deck/deployment/utils/constants.py b/compass-deck/deployment/utils/constants.py
new file mode 100644
index 0000000..e90b1b2
--- /dev/null
+++ b/compass-deck/deployment/utils/constants.py
@@ -0,0 +1,84 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
+
+
+"""All keywords variables in deployment are defined in this module."""
+
+
+# General keywords
+BASEINFO = 'baseinfo'
+CLUSTER = 'cluster'
+HOST = 'host'
+HOSTS = 'hosts'
+ID = 'id'
+NAME = 'name'
+PASSWORD = 'password'
+USERNAME = 'username'
+
+
+# Adapter info related keywords
+FLAVOR = 'flavor'
+FLAVORS = 'flavors'
+PLAYBOOK = 'playbook'
+FLAVOR_NAME = 'flavor_name'
+HEALTH_CHECK_CMD = 'health_check_cmd'
+TMPL = 'template'
+INSTALLER_SETTINGS = 'settings'
+METADATA = 'metadata'
+OS_INSTALLER = 'os_installer'
+PK_INSTALLER = 'package_installer'
+SUPPORT_OSES = 'supported_oses'
+
+
+# Cluster info related keywords
+ADAPTER_ID = 'adapter_id'
+OS_VERSION = 'os_name'
+
+
+# Host info related keywords
+DNS = 'dns'
+DOMAIN = 'domain'
+HOST_ID = 'host_id'
+HOSTNAME = 'hostname'
+IP_ADDR = 'ip'
+IPMI = 'ipmi'
+IPMI_CREDS = 'ipmi_credentials'
+MAC_ADDR = 'mac'
+MGMT_NIC_FLAG = 'is_mgmt'
+NETMASK = 'netmask'
+NETWORKS = 'networks'
+NIC = 'interface'
+CLUSTER_ID = 'cluster_id'
+ORIGIN_CLUSTER_ID = 'origin_cluster_id'
+PROMISCUOUS_FLAG = 'is_promiscuous'
+REINSTALL_OS_FLAG = 'reinstall_os'
+SUBNET = 'subnet'
+
+
+# Cluster/host config related keywords
+COMPLETED_PK_CONFIG = 'completed_package_config'
+COMPLETED_OS_CONFIG = 'completed_os_config'
+DEPLOYED_OS_CONFIG = 'deployed_os_config'
+DEPLOYED_PK_CONFIG = 'deployed_package_config'
+NETWORK_MAPPING = 'network_mapping'
+OS_CONFIG = 'os_config'
+OS_CONFIG_GENERAL = 'general'
+PK_CONFIG = 'package_config'
+ROLES = 'roles'
+PATCHED_ROLES = 'patched_roles'
+ROLES_MAPPING = 'roles_mapping'
+SERVER_CREDS = 'server_credentials'
+TMPL_VARS_DICT = 'vars_dict'
diff --git a/compass-deck/misc/Dockerfile b/compass-deck/misc/Dockerfile
new file mode 100644
index 0000000..116bf9f
--- /dev/null
+++ b/compass-deck/misc/Dockerfile
@@ -0,0 +1,86 @@
+from centos:latest
+
+# Add repos
+RUN rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
+ sed -i 's/^mirrorlist=https/mirrorlist=http/g' /etc/yum.repos.d/epel.repo && \
+ rpm -Uvh http://rpms.famillecollet.com/enterprise/remi-release-7.rpm
+
+# yum update
+RUN yum update -y
+
+# udpate repo
+ADD misc/compass_install.repo /etc/yum.repos.d/compass_install.repo
+
+# Install packages
+RUN yum --enablerepo=compass_install --nogpgcheck install -y python python-devel git wget syslinux amqp mod_wsgi httpd bind rsync yum-utils gcc unzip openssl openssl098e ca-certificates mysql-devel mysql MySQL-python python-virtualenv python-setuptools python-pip bc libselinux-python libffi-devel openssl-devel vim net-tools
+
+# Add code
+RUN mkdir -p /root/compass-deck
+ADD . /root/compass-deck
+RUN cd /root/ && \
+ git clone git://git.openstack.org/openstack/compass-web
+
+RUN mkdir -p /root/compass-deck/compass && \
+ mv /root/compass-deck/actions /root/compass-deck/compass/ && \
+ mv /root/compass-deck/api /root/compass-deck/compass/ && \
+ mv /root/compass-deck/apiclient /root/compass-deck/compass/ && \
+ mv /root/compass-deck/deployment /root/compass-deck/compass/ && \
+ mv /root/compass-deck/utils /root/compass-deck/compass/ && \
+ mv /root/compass-deck/db /root/compass-deck/compass/ && \
+ mv /root/compass-deck/tasks /root/compass-deck/compass/ && \
+ mv /root/compass-deck/log_analyzor /root/compass-deck/compass/
+
+# pip
+RUN easy_install --upgrade pip && \
+ pip install --upgrade pip && \
+ pip install --upgrade setuptools && \
+ pip install --upgrade virtualenv && \
+ pip install --upgrade redis && \
+ pip install --upgrade virtualenvwrapper
+
+# http
+RUN mkdir -p /var/log/httpd && \
+ chmod -R 777 /var/log/httpd
+
+# virtualenv
+RUN yum install -y which && \
+ source `which virtualenvwrapper.sh` && \
+ mkvirtualenv --system-site-packages compass-core && \
+ workon compass-core && \
+ cd /root/compass-deck && \
+ pip install -U -r requirements.txt
+
+# web
+RUN mkdir -p /var/www/compass_web/v2.5 && \
+ cp -rf /root/compass-web/v2.5/target/* /var/www/compass_web/v2.5/
+
+# compass-server
+RUN echo "ServerName compass-deck:80" >> /etc/httpd/conf/httpd.conf
+RUN mkdir -p /opt/compass/bin && \
+ mkdir -p /opt/compass/db
+ADD misc/apache/ods-server.conf /etc/httpd/conf.d/ods-server.conf
+ADD misc/apache/http_pip.conf /etc/httpd/conf.d/http_pip.conf
+ADD misc/apache/images.conf /etc/httpd/conf.d/images.conf
+ADD misc/apache/packages.conf /etc/httpd/conf.d/packages.conf
+#COPY conf /etc/compass
+ADD bin/* /opt/compass/bin/
+RUN mkdir -p /var/www/compass && \
+ ln -s -f /opt/compass/bin/compass_wsgi.py /var/www/compass/compass.wsgi && \
+ cp -rf /usr/lib64/libcrypto.so.6 /usr/lib64/libcrypto.so
+
+
+# install comapss-deck code
+RUN mkdir -p /var/log/compass && \
+ chmod -R 777 /var/log/compass && \
+ chmod -R 777 /opt/compass/db && \
+ touch /root/compass-deck/compass/__init__.py && \
+ source `which virtualenvwrapper.sh` && \
+ workon compass-core && \
+ cd /root/compass-deck && \
+ python setup.py install && \
+ usermod -a -G root apache
+
+EXPOSE 80
+ADD start.sh /usr/local/bin/start.sh
+ENTRYPOINT ["/bin/bash", "-c"]
+CMD ["/usr/local/bin/start.sh"]
diff --git a/compass-deck/misc/adapter_changes/Debian.yml b/compass-deck/misc/adapter_changes/Debian.yml
new file mode 100644
index 0000000..0f76f75
--- /dev/null
+++ b/compass-deck/misc/adapter_changes/Debian.yml
@@ -0,0 +1,18 @@
+#############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+
+cron_path: "/var/spool/cron/crontabs"
+
+packages:
+ - keystone
+
+services:
+ - apache2
+ - keystone
diff --git a/compass-deck/misc/adapter_changes/HA-ansible-multinodes.yml b/compass-deck/misc/adapter_changes/HA-ansible-multinodes.yml
new file mode 100644
index 0000000..b0c5810
--- /dev/null
+++ b/compass-deck/misc/adapter_changes/HA-ansible-multinodes.yml
@@ -0,0 +1,239 @@
+---
+- hosts: all
+ remote_user: root
+ pre_tasks:
+ - name: make sure ssh dir exist
+ file:
+ path: '{{ item.path }}'
+ owner: '{{ item.owner }}'
+ group: '{{ item.group }}'
+ state: directory
+ mode: 0755
+ with_items:
+ - path: /root/.ssh
+ owner: root
+ group: root
+
+ - name: write ssh config
+ copy:
+ content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
+ dest: '{{ item.dest }}'
+ owner: '{{ item.owner }}'
+ group: '{{ item.group }}'
+ mode: 0600
+ with_items:
+ - dest: /root/.ssh/config
+ owner: root
+ group: root
+
+ - name: generate ssh keys
+ shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi;
+
+ - name: fetch ssh keys
+ fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes
+
+ - authorized_key:
+ user: root
+ key: "{{ lookup('file', 'item') }}"
+ with_fileglob:
+ - /tmp/ssh-keys-*
+ max_fail_percentage: 0
+ roles:
+ - common
+
+- hosts: all
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - setup-network
+
+- hosts: ha
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - ha
+
+- hosts: controller
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - memcached
+ - apache
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - cinder-controller
+ - glance
+ - neutron-common
+ - neutron-network
+ - ceilometer_controller
+# - ext-network
+ - dashboard
+ - heat
+ - aodh
+
+- hosts: all
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - storage
+
+- hosts: compute
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - nova-compute
+ - neutron-compute
+ - cinder-volume
+ - ceilometer_compute
+
+- hosts: all
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - secgroup
+
+- hosts: ceph_adm
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles: []
+ # - ceph-deploy
+
+- hosts: ceph
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - ceph-purge
+ - ceph-config
+
+- hosts: ceph_mon
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - ceph-mon
+
+- hosts: ceph_osd
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - ceph-osd
+
+- hosts: ceph
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - ceph-openstack
+
+- hosts: all
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - monitor
+
+
+- hosts: all
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ tasks:
+ - name: set bash to nova
+ user:
+ name: nova
+ shell: /bin/bash
+
+ - name: make sure ssh dir exist
+ file:
+ path: '{{ item.path }}'
+ owner: '{{ item.owner }}'
+ group: '{{ item.group }}'
+ state: directory
+ mode: 0755
+ with_items:
+ - path: /var/lib/nova/.ssh
+ owner: nova
+ group: nova
+
+ - name: copy ssh keys for nova
+ shell: cp -rf /root/.ssh/id_rsa /var/lib/nova/.ssh;
+
+ - name: write ssh config
+ copy:
+ content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
+ dest: '{{ item.dest }}'
+ owner: '{{ item.owner }}'
+ group: '{{ item.group }}'
+ mode: 0600
+ with_items:
+ - dest: /var/lib/nova/.ssh/config
+ owner: nova
+ group: nova
+
+ - authorized_key:
+ user: nova
+ key: "{{ lookup('file', 'item') }}"
+ with_fileglob:
+ - /tmp/ssh-keys-*
+
+ - name: chown ssh file
+ shell: chown -R nova:nova /var/lib/nova/.ssh;
+
+
+- hosts: all
+ remote_user: root
+ max_fail_percentage: 0
+ roles:
+ - odl_cluster
+
+- hosts: all
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - onos_cluster
+
+- hosts: all
+ remote_user: root
+ sudo: True
+ max_fail_percentage: 0
+ roles:
+ - open-contrail
+
+- hosts: all
+ remote_user: root
+ serial: 1
+ max_fail_percentage: 0
+ roles:
+ - odl_cluster_neutron
+
+- hosts: all
+ remote_user: root
+ max_fail_percentage: 0
+ roles:
+ - odl_cluster_post
+
+- hosts: controller
+ remote_user: root
+ max_fail_percentage: 0
+ roles:
+ - ext-network
+
+- hosts: controller
+ remote_user: root
+ accelerate: false
+ max_fail_percentage: 0
+ roles:
+ - tacker
diff --git a/compass-deck/misc/adapter_changes/keystone_install.yml b/compass-deck/misc/adapter_changes/keystone_install.yml
new file mode 100644
index 0000000..01907c6
--- /dev/null
+++ b/compass-deck/misc/adapter_changes/keystone_install.yml
@@ -0,0 +1,74 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install keystone packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: packages | union(packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: generate keystone service list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: services | union(services_noarch)
+
+- name: delete sqlite database
+ file:
+ path: /var/lib/keystone/keystone.db
+ state: absent
+
+- name: update keystone conf
+ template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
+ notify:
+ - restart keystone services
+
+- name: update apache2 configs
+ template:
+ src: wsgi-keystone.conf.j2
+ dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
+ when: ansible_os_family == 'Debian'
+ notify:
+ - restart keystone services
+
+- name: update apache2 configs
+ template:
+ src: wsgi-keystone.conf.j2
+ dest: '{{ apache_config_dir }}/wsgi-keystone.conf'
+ when: ansible_os_family == 'RedHat'
+ notify:
+ - restart keystone services
+
+- name: enable keystone server
+ file:
+ src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
+ dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
+ state: "link"
+ when: ansible_os_family == 'Debian'
+ notify:
+ - restart keystone services
+
+- name: keystone source files
+ template: src={{ item }} dest=/opt/{{ item }}
+ with_items:
+ - admin-openrc.sh
+ - demo-openrc.sh
+
+- meta: flush_handlers
diff --git a/compass-deck/misc/adapter_changes/preseed_post_anamon_local b/compass-deck/misc/adapter_changes/preseed_post_anamon_local
new file mode 100644
index 0000000..c4f461f
--- /dev/null
+++ b/compass-deck/misc/adapter_changes/preseed_post_anamon_local
@@ -0,0 +1,80 @@
+#if $str($getVar('anamon_enabled','')) == "1"
+
+## install anamon script
+ #if $getVar("compass_server", "") != ""
+wget -O /usr/local/sbin/anamon "http://$compass_server:$http_port/cobbler/aux/anamon"
+ #else
+wget -O /usr/local/sbin/anamon "http://$server:$http_port/cobbler/aux/anamon"
+ #end if
+## install anamon system service
+cat << EOF > /etc/init.d/anamon.init
+#raw
+#!/bin/bash
+## BEGIN INIT INFO
+# Provides: anamon.init
+# Default-Start: 3 5
+# Default-Stop: 0 1 2 4 6
+# Required-Start: $network
+# Short-Description: Starts the cobbler anamon boot notification program
+# Description: anamon runs the first time a machine is booted after
+# installation.
+## END INIT INFO
+
+#
+# anamon.init: Starts the cobbler post-install boot notification program
+#
+# chkconfig: 35 95 95
+#
+# description: anamon runs the first time a machine is booted after
+# installation.
+#
+#end raw
+cd /var/log/installer
+gunzip initial-status.gz
+cd -
+#if $getVar("compass_server","") != ""
+/usr/local/sbin/anamon --watchfile "/var/log/installer/syslog /var/log/installer/hardware-summary /var/log/installer/initial-status /var/log/installer/status" --name $name --server $compass_server --port $http_port --exit
+#else
+/usr/local/sbin/anamon --watchfile "/var/log/installer/syslog /var/log/installer/hardware-summary /var/log/installer/initial-status /var/log/installer/status" --name $name --server $server --port $http_port --exit
+#end if
+update-rc.d -f anamon remove
+mv /etc/init.d/anamon.init /tmp/anamon.init
+EOF
+
+## adjust permissions
+chmod 755 /etc/init.d/anamon.init /usr/local/sbin/anamon
+test -d /selinux && restorecon /etc/init.d/anamon.init /usr/local/sbin/anamon
+
+## enable the script
+update-rc.d anamon.init defaults 95 95
+#end if
+
+## place start-up script for updating os state
+#if $getVar('compass_server', '') != ""
+ #set srv = $getVar('compass_server','')
+#else
+ #set srv = $getVar('server','')
+#end if
+cat << EOF > /etc/init.d/set_state
+#raw
+#!/bin/bash
+# Provides: set_state
+# Default-Start: 3 5
+# Default-Stop: 0 1 2 4 6
+# Required-Start: $network $ssh
+# Short-Description: Notifies the os installation is finished
+# Description: set_state runs the first time a machine is booted after
+# installation.
+#end raw
+wget -O /tmp/os_state --post-data='{"ready": true}' --header=Content-Type:application/json "http://$srv/api/hosts/${host_id}/state_internal"
+update-rc.d -f set_state remove
+mv /etc/init.d/set_state /tmp/set_state
+EOF
+
+## adjust permissions
+chmod 755 /etc/init.d/set_state
+test -d /selinux && restorecon /etc/init.d/set_state
+
+update-rc.d set_state defaults 99 99
+
+echo "compass_server=$server" >> /etc/compass.conf
diff --git a/compass-deck/misc/adapter_changes/preseed_post_anamon_remote b/compass-deck/misc/adapter_changes/preseed_post_anamon_remote
new file mode 100644
index 0000000..aae183a
--- /dev/null
+++ b/compass-deck/misc/adapter_changes/preseed_post_anamon_remote
@@ -0,0 +1,80 @@
+#if $str($getVar('anamon_enabled','')) == "1"
+
+## install anamon script
+ #if $getVar("compass_server", "") != ""
+wget -O /usr/local/sbin/anamon "http://$compass_server:$http_port/cobbler/aux/anamon"
+ #else
+wget -O /usr/local/sbin/anamon "http://$server:$http_port/cobbler/aux/anamon"
+ #end if
+## install anamon system service
+cat << EOF > /etc/init.d/anamon.init
+#raw
+#!/bin/bash
+## BEGIN INIT INFO
+# Provides: anamon.init
+# Default-Start: 3 5
+# Default-Stop: 0 1 2 4 6
+# Required-Start: $network
+# Short-Description: Starts the cobbler anamon boot notification program
+# Description: anamon runs the first time a machine is booted after
+# installation.
+## END INIT INFO
+
+#
+# anamon.init: Starts the cobbler post-install boot notification program
+#
+# chkconfig: 35 95 95
+#
+# description: anamon runs the first time a machine is booted after
+# installation.
+#
+#end raw
+cd /var/log/installer
+gunzip initial-status.gz
+cd -
+#if $getVar("compass_server","") != ""
+/usr/local/sbin/anamon --watchfile "/var/log/installer/syslog /var/log/installer/hardware-summary /var/log/installer/initial-status /var/log/installer/status" --name $name --server $compass_server --port $http_port --exit
+#else
+/usr/local/sbin/anamon --watchfile "/var/log/installer/syslog /var/log/installer/hardware-summary /var/log/installer/initial-status /var/log/installer/status" --name $name --server $server --port $http_port --exit
+#end if
+update-rc.d -f anamon remove
+mv /etc/init.d/anamon.init /tmp/anamon.init
+EOF
+
+## adjust permissions
+chmod 755 /etc/init.d/anamon.init /usr/local/sbin/anamon
+test -d /selinux && restorecon /etc/init.d/anamon.init /usr/local/sbin/anamon
+
+## enable the script
+update-rc.d anamon.init defaults 95 95
+#end if
+
+## place start-up script for updating os state
+#if $getVar('compass_server', '') != ""
+ #set srv = $getVar('compass_server','')
+#else
+ #set srv = $getVar('server','')
+#end if
+cat << EOF > /etc/init.d/set_state
+#raw
+#!/bin/bash
+# Provides: set_state
+# Default-Start: 3 5
+# Default-Stop: 0 1 2 4 6
+# Required-Start: $network $ssh
+# Short-Description: Notifies the os installation is finished
+# Description: set_state runs the first time a machine is booted after
+# installation.
+#end raw
+wget -O /tmp/os_state --post-data='{"ready": true}' --header=Content-Type:application/json "http://c.stack360.io/api/hosts/${host_id}/state_internal"
+update-rc.d -f set_state remove
+mv /etc/init.d/set_state /tmp/set_state
+EOF
+
+## adjust permissions
+chmod 755 /etc/init.d/set_state
+test -d /selinux && restorecon /etc/init.d/set_state
+
+update-rc.d set_state defaults 99 99
+
+echo "compass_server=$server" >> /etc/compass.conf
diff --git a/compass-deck/misc/apache/README b/compass-deck/misc/apache/README
new file mode 100644
index 0000000..73b883c
--- /dev/null
+++ b/compass-deck/misc/apache/README
@@ -0,0 +1,15 @@
+Apache2 is supported. mod_wsgi is required to run Compass web app.
+
+On Ubuntu systems (e.g, 12.04), you can install mod_wsgi with
+ sudo aptitude install libapache2-mod-wsgi
+
+The current wsgi config file assumes that the environment variable
+ODS_PROJECT_HOME is set in /etc/apache2/envvars file.You also need
+to make sure mod_rewrite module in apache is enabled. If not, you
+need to run the command: sudo a2enmod rewrite
+
+We also assume that the server host name is ods-server.us.huawei.com.
+
+Restart Apache to get mod_wsgi to work.
+
+sudo service apache2 restart
diff --git a/compass-deck/misc/apache/cobbler_web.conf b/compass-deck/misc/apache/cobbler_web.conf
new file mode 100644
index 0000000..f03d4fe
--- /dev/null
+++ b/compass-deck/misc/apache/cobbler_web.conf
@@ -0,0 +1,10 @@
+# This configuration file enables the cobbler web
+# interface (django version)
+
+# Force everything to go to https
+RewriteEngine on
+RewriteCond %{HTTPS} off
+RewriteCond %{REQUEST_URI} ^/cobbler_web
+# RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI}
+
+WSGIScriptAlias /cobbler_web /usr/share/cobbler/web/cobbler.wsgi
diff --git a/compass-deck/misc/apache/http_pip.conf b/compass-deck/misc/apache/http_pip.conf
new file mode 100644
index 0000000..9a61de2
--- /dev/null
+++ b/compass-deck/misc/apache/http_pip.conf
@@ -0,0 +1,9 @@
+Alias /pip /var/www/pip
+
+<Directory "/var/www/pip">
+ SetEnv VIRTUALENV
+ Options Indexes FollowSymLinks
+ Order allow,deny
+ Allow from all
+</Directory>
+
diff --git a/compass-deck/misc/apache/images.conf b/compass-deck/misc/apache/images.conf
new file mode 100644
index 0000000..d38986a
--- /dev/null
+++ b/compass-deck/misc/apache/images.conf
@@ -0,0 +1,9 @@
+Alias /image /var/www/guestimg
+
+<Directory "/var/www/guestimg">
+ SetEnv VIRTUALENV
+ Options Indexes FollowSymLinks
+ Order allow,deny
+ Allow from all
+</Directory>
+
diff --git a/compass-deck/misc/apache/ods-server.conf b/compass-deck/misc/apache/ods-server.conf
new file mode 100644
index 0000000..a773777
--- /dev/null
+++ b/compass-deck/misc/apache/ods-server.conf
@@ -0,0 +1,18 @@
+# Apache config for ods server
+#
+# Specify python path if you use virtualenv
+
+WSGIDaemonProcess compass threads=4 display-name=%{GROUP}
+WSGIProcessGroup compass
+WSGIScriptAlias /api /var/www/compass/compass.wsgi
+WSGISocketPrefix /var/run/wsgi
+
+<VirtualHost *:80>
+ DocumentRoot /var/www/compass_web/v2.5
+
+ <Directory "/var/www/compass_web/v2.5">
+ Options Indexes FollowSymLinks
+ Order allow,deny
+ Allow from all
+ </Directory>
+</VirtualHost>
diff --git a/compass-deck/misc/apache/packages.conf b/compass-deck/misc/apache/packages.conf
new file mode 100644
index 0000000..0934fcd
--- /dev/null
+++ b/compass-deck/misc/apache/packages.conf
@@ -0,0 +1,9 @@
+Alias /packages /var/www/packages
+
+<Directory "/var/www/packages">
+ SetEnv VIRTUALENV
+ Options Indexes FollowSymLinks
+ Order allow,deny
+ Allow from all
+</Directory>
+
diff --git a/compass-deck/misc/apache/ssl.conf b/compass-deck/misc/apache/ssl.conf
new file mode 100644
index 0000000..703f97d
--- /dev/null
+++ b/compass-deck/misc/apache/ssl.conf
@@ -0,0 +1,221 @@
+#
+# This is the Apache server configuration file providing SSL support.
+# It contains the configuration directives to instruct the server how to
+# serve pages over an https connection. For detailing information about these
+# directives see <URL:http://httpd.apache.org/docs/2.2/mod/mod_ssl.html>
+#
+# Do NOT simply read the instructions in here without understanding
+# what they do. They're here only as hints or reminders. If you are unsure
+# consult the online docs. You have been warned.
+#
+
+LoadModule ssl_module modules/mod_ssl.so
+
+#
+# When we also provide SSL we have to listen to the
+# the HTTPS port in addition.
+#
+Listen 445
+
+##
+## SSL Global Context
+##
+## All SSL configuration in this context applies both to
+## the main server and all SSL-enabled virtual hosts.
+##
+
+# Pass Phrase Dialog:
+# Configure the pass phrase gathering process.
+# The filtering dialog program (`builtin' is a internal
+# terminal dialog) has to provide the pass phrase on stdout.
+SSLPassPhraseDialog builtin
+
+# Inter-Process Session Cache:
+# Configure the SSL Session Cache: First the mechanism
+# to use and second the expiring timeout (in seconds).
+SSLSessionCache shmcb:/var/cache/mod_ssl/scache(512000)
+SSLSessionCacheTimeout 300
+
+# Semaphore:
+# Configure the path to the mutual exclusion semaphore the
+# SSL engine uses internally for inter-process synchronization.
+
+# Pseudo Random Number Generator (PRNG):
+# Configure one or more sources to seed the PRNG of the
+# SSL library. The seed data should be of good random quality.
+# WARNING! On some platforms /dev/random blocks if not enough entropy
+# is available. This means you then cannot use the /dev/random device
+# because it would lead to very long connection times (as long as
+# it requires to make more entropy available). But usually those
+# platforms additionally provide a /dev/urandom device which doesn't
+# block. So, if available, use this one instead. Read the mod_ssl User
+# Manual for more details.
+SSLRandomSeed startup file:/dev/urandom 256
+SSLRandomSeed connect builtin
+#SSLRandomSeed startup file:/dev/random 512
+#SSLRandomSeed connect file:/dev/random 512
+#SSLRandomSeed connect file:/dev/urandom 512
+
+#
+# Use "SSLCryptoDevice" to enable any supported hardware
+# accelerators. Use "openssl engine -v" to list supported
+# engine names. NOTE: If you enable an accelerator and the
+# server does not start, consult the error logs and ensure
+# your accelerator is functioning properly.
+#
+SSLCryptoDevice builtin
+#SSLCryptoDevice ubsec
+
+##
+## SSL Virtual Host Context
+##
+
+<VirtualHost _default_:445>
+
+# General setup for the virtual host, inherited from global configuration
+#DocumentRoot "/var/www/html"
+#ServerName www.example.com:443
+
+# Use separate log files for the SSL virtual host; note that LogLevel
+# is not inherited from httpd.conf.
+ErrorLog logs/ssl_error_log
+TransferLog logs/ssl_access_log
+LogLevel warn
+
+# SSL Engine Switch:
+# Enable/Disable SSL for this virtual host.
+SSLEngine on
+
+# SSL Protocol support:
+# List the enable protocol levels with which clients will be able to
+# connect. Disable SSLv2 access by default:
+SSLProtocol all -SSLv2
+
+# SSL Cipher Suite:
+# List the ciphers that the client is permitted to negotiate.
+# See the mod_ssl documentation for a complete list.
+SSLCipherSuite ALL:!ADH:!EXPORT:!SSLv2:RC4+RSA:+HIGH:+MEDIUM:+LOW
+
+# Server Certificate:
+# Point SSLCertificateFile at a PEM encoded certificate. If
+# the certificate is encrypted, then you will be prompted for a
+# pass phrase. Note that a kill -HUP will prompt again. A new
+# certificate can be generated using the genkey(1) command.
+SSLCertificateFile /etc/pki/tls/certs/localhost.crt
+
+# Server Private Key:
+# If the key is not combined with the certificate, use this
+# directive to point at the key file. Keep in mind that if
+# you've both a RSA and a DSA private key you can configure
+# both in parallel (to also allow the use of DSA ciphers, etc.)
+SSLCertificateKeyFile /etc/pki/tls/private/localhost.key
+
+# Server Certificate Chain:
+# Point SSLCertificateChainFile at a file containing the
+# concatenation of PEM encoded CA certificates which form the
+# certificate chain for the server certificate. Alternatively
+# the referenced file can be the same as SSLCertificateFile
+# when the CA certificates are directly appended to the server
+# certificate for convinience.
+#SSLCertificateChainFile /etc/pki/tls/certs/server-chain.crt
+
+# Certificate Authority (CA):
+# Set the CA certificate verification path where to find CA
+# certificates for client authentication or alternatively one
+# huge file containing all of them (file must be PEM encoded)
+#SSLCACertificateFile /etc/pki/tls/certs/ca-bundle.crt
+
+# Client Authentication (Type):
+# Client certificate verification type and depth. Types are
+# none, optional, require and optional_no_ca. Depth is a
+# number which specifies how deeply to verify the certificate
+# issuer chain before deciding the certificate is not valid.
+#SSLVerifyClient require
+#SSLVerifyDepth 10
+
+# Access Control:
+# With SSLRequire you can do per-directory access control based
+# on arbitrary complex boolean expressions containing server
+# variable checks and other lookup directives. The syntax is a
+# mixture between C and Perl. See the mod_ssl documentation
+# for more details.
+#<Location />
+#SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)/ \
+# and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \
+# and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \
+# and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \
+# and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \
+# or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/
+#</Location>
+
+# SSL Engine Options:
+# Set various options for the SSL engine.
+# o FakeBasicAuth:
+# Translate the client X.509 into a Basic Authorisation. This means that
+# the standard Auth/DBMAuth methods can be used for access control. The
+# user name is the `one line' version of the client's X.509 certificate.
+# Note that no password is obtained from the user. Every entry in the user
+# file needs this password: `xxj31ZMTZzkVA'.
+# o ExportCertData:
+# This exports two additional environment variables: SSL_CLIENT_CERT and
+# SSL_SERVER_CERT. These contain the PEM-encoded certificates of the
+# server (always existing) and the client (only existing when client
+# authentication is used). This can be used to import the certificates
+# into CGI scripts.
+# o StdEnvVars:
+# This exports the standard SSL/TLS related `SSL_*' environment variables.
+# Per default this exportation is switched off for performance reasons,
+# because the extraction step is an expensive operation and is usually
+# useless for serving static content. So one usually enables the
+# exportation for CGI and SSI requests only.
+# o StrictRequire:
+# This denies access when "SSLRequireSSL" or "SSLRequire" applied even
+# under a "Satisfy any" situation, i.e. when it applies access is denied
+# and no other module can change it.
+# o OptRenegotiate:
+# This enables optimized SSL connection renegotiation handling when SSL
+# directives are used in per-directory context.
+#SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire
+<Files ~ "\.(cgi|shtml|phtml|php3?)$">
+ SSLOptions +StdEnvVars
+</Files>
+<Directory "/var/www/cgi-bin">
+ SSLOptions +StdEnvVars
+</Directory>
+
+# SSL Protocol Adjustments:
+# The safe and default but still SSL/TLS standard compliant shutdown
+# approach is that mod_ssl sends the close notify alert but doesn't wait for
+# the close notify alert from client. When you need a different shutdown
+# approach you can use one of the following variables:
+# o ssl-unclean-shutdown:
+# This forces an unclean shutdown when the connection is closed, i.e. no
+# SSL close notify alert is send or allowed to received. This violates
+# the SSL/TLS standard but is needed for some brain-dead browsers. Use
+# this when you receive I/O errors because of the standard approach where
+# mod_ssl sends the close notify alert.
+# o ssl-accurate-shutdown:
+# This forces an accurate shutdown when the connection is closed, i.e. a
+# SSL close notify alert is send and mod_ssl waits for the close notify
+# alert of the client. This is 100% SSL/TLS standard compliant, but in
+# practice often causes hanging connections with brain-dead browsers. Use
+# this only for browsers where you know that their SSL implementation
+# works correctly.
+# Notice: Most problems of broken clients are also related to the HTTP
+# keep-alive facility, so you usually additionally want to disable
+# keep-alive for those clients, too. Use variable "nokeepalive" for this.
+# Similarly, one has to force some clients to use HTTP/1.0 to workaround
+# their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and
+# "force-response-1.0" for this.
+SetEnvIf User-Agent ".*MSIE.*" \
+ nokeepalive ssl-unclean-shutdown \
+ downgrade-1.0 force-response-1.0
+
+# Per-Server Logging:
+# The home of a custom SSL log file. Use this when you want a
+# compact non-error SSL logfile on a virtual host basis.
+CustomLog logs/ssl_request_log \
+ "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \"%r\" %b"
+
+</VirtualHost>
+
diff --git a/compass-deck/misc/chef-server/chef-server.rb b/compass-deck/misc/chef-server/chef-server.rb
new file mode 100644
index 0000000..f7956a6
--- /dev/null
+++ b/compass-deck/misc/chef-server/chef-server.rb
@@ -0,0 +1,4 @@
+nginx['non_ssl_port'] = 8080
+nginx['enable_non_ssl'] = true
+nginx['ssl_port'] = 443
+nginx['url'] = "https://#{node['fqdn']}"
diff --git a/compass-deck/misc/ci/prepare_node_compass.sh b/compass-deck/misc/ci/prepare_node_compass.sh
new file mode 100755
index 0000000..b9c40db
--- /dev/null
+++ b/compass-deck/misc/ci/prepare_node_compass.sh
@@ -0,0 +1,28 @@
+#!/bin/bash -x
+echo 0 > /selinux/enforce
+yum clean all
+yum -y update --skip-broken
+yum install -y virt-install libvirt qemu-kvm figlet rsyslog logrotate iproute openssh-clients python git wget python-setuptools python-netaddr python-flask python-flask-sqlalchemy python-amqplib amqp python-paramiko python-mock dhcp bind rsync yum-utils xinetd tftp-server gcc net-snmp-utils net-snmp net-snmp-python python-daemon unzip openssl openssl098e createrepo mkisofs python-cheetah python-simplejson python-urlgrabber PyYAML Django cman debmirror pykickstart libxml2-devel libxslt-devel python-devel sshpass bc
+service libvirtd start
+sed -i "s/Defaults requiretty/#Defaults requiretty/" /etc/sudoers
+brctl show |grep installation > /dev/null
+if [[ $? -eq 0 ]] ; then
+ echo "bridge already exists"
+else
+ brctl addbr installation
+ brctl addif installation eth1
+ ifconfig eth1 up
+ dhclient -r eth1
+ dhclient -r installation
+ dhclient installation
+fi
+git clone http://git.openstack.org/openstack/compass-core -b dev/experimental ||exit $?
+cd compass-core
+source install/install.conf.template
+source install/install.conf
+source install/setup_env.sh
+source install/dependency.sh
+source install/prepare.sh
+sync
+sleep 5
+echo "image preparation done"
diff --git a/compass-deck/misc/ci/pxe-deploy.sh b/compass-deck/misc/ci/pxe-deploy.sh
new file mode 100755
index 0000000..545c95a
--- /dev/null
+++ b/compass-deck/misc/ci/pxe-deploy.sh
@@ -0,0 +1,14 @@
+#!/bin/bash -xe
+ln -s /var/log/cobbler/anamon cobbler_logs
+ln -s /var/log/compass compass_logs
+ln -s /var/log/chef chef_logs
+cp compass-core/compass/apiclient/example.py /tmp/test.py
+chmod +x /tmp/test.py
+virsh destroy pxe01
+virsh start pxe01
+virsh list
+source compass-core/install/install.conf.template
+/usr/bin/python /tmp/test.py
+if [ "$tempest" == "true" ]; then
+ ./tempest_run.sh
+fi
diff --git a/compass-deck/misc/ci/pxe-prepare.sh b/compass-deck/misc/ci/pxe-prepare.sh
new file mode 100755
index 0000000..08f5eb3
--- /dev/null
+++ b/compass-deck/misc/ci/pxe-prepare.sh
@@ -0,0 +1,29 @@
+#!/bin/bash -x
+if [[ ! -e /tmp/pxe01.raw ]]; then
+ qemu-img create -f raw /tmp/pxe01.raw 20G
+else
+ rm -rf /tmp/pxe01.raw
+ qemu-img create -f raw /tmp/pxe01.raw 20G
+fi
+virsh list |grep pxe01
+vmrc=$?
+if [[ $vmrc -eq 0 ]] ; then
+ virsh destroy pxe01
+ virsh undefine pxe01
+else
+ echo "no legacy pxe vm found"
+fi
+virt-install --accelerate --hvm --connect qemu:///system \
+ --network=bridge:installation,mac=00:11:20:30:40:01 --pxe \
+ --network=network:default \
+ --name pxe01 --ram=8192 \
+ --disk /tmp/pxe01.raw,format=raw \
+ --vcpus=10 \
+ --graphics vnc,listen=0.0.0.0 --noautoconsole \
+ --os-type=linux --os-variant=rhel6
+rm -rf switch-file
+echo "machine,10.145.81.220,5,1,00:11:20:30:40:01" > switch-file
+echo "switch,10.145.81.220,huawei,v2c,public,under_monitoring" >> switch-file
+/usr/bin/python /opt/compass/bin/manage_db.py set_switch_machines --switch_machines_file switch-file
+/usr/bin/python /opt/compass/bin/manage_db.py clean_clusters
+/usr/bin/python /opt/compass/bin/manage_db.py clean_installation_progress
diff --git a/compass-deck/misc/ci/tempest_run.sh b/compass-deck/misc/ci/tempest_run.sh
new file mode 100755
index 0000000..7ac2212
--- /dev/null
+++ b/compass-deck/misc/ci/tempest_run.sh
@@ -0,0 +1,81 @@
+#!/bin/bash -xe
+# Determinate is the given option present in the INI file
+# ini_has_option config-file section option
+function ini_has_option {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+ local file=$1
+ local section=$2
+ local option=$3
+ local line
+ line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+ $xtrace
+ [ -n "$line" ]
+}
+# Set an option in an INI file
+# iniset config-file section option value
+function iniset {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+ local file=$1
+ local section=$2
+ local option=$3
+ local value=$4
+
+ [[ -z $section || -z $option ]] && return
+
+ if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
+ # Add section at the end
+ echo -e "\n[$section]" >>"$file"
+ fi
+ if ! ini_has_option "$file" "$section" "$option"; then
+ # Add it
+ sed -i -e "/^\[$section\]/ a\\
+$option = $value
+" "$file"
+ else
+ local sep=$(echo -ne "\x01")
+ # Replace it
+ sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
+ fi
+ $xtrace
+}
+#Install prerequites for Tempest
+pip install tox==1.6.1
+#Install setuptools twice so that it is really upgraded
+pip install -U setuptools
+pip install -U setuptools
+pip install -U virtualenvwrapper
+yum install -y libxml2-devel libxslt-devel python-devel sshpass
+if [[ ! -e /tmp/tempest ]]; then
+ git clone http://git.openstack.org/openstack/tempest /tmp/tempest
+ cd /tmp/tempest
+else
+ cd /tmp/tempest
+ git remote set-url origin http://git.openstack.org/openstack/tempest
+ git remote update
+ git reset --hard
+ git clean -x -f -d -q
+ git checkout remotes/origin/master
+fi
+source `which virtualenvwrapper.sh`
+set +e
+if ! lsvirtualenv |grep tempest>/dev/null; then
+ mkvirtualenv tempest
+ workon tempest
+else
+ workon tempest
+fi
+set -e
+cd /tmp/tempest
+#Install Tempest including dependencies
+pip install -e .
+nova_api_host=$(knife search node 'roles:os-compute-api' | grep 'IP:' | awk '{print $2}' | head -1)
+sshpass -p 'root' scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r root@$nova_api_host:/root/openrc /root/.
+source /root/openrc
+# wait for nova-compute neutron-agent and cinder-volume to report health
+# In some scenarios, nova-compute is up before conductor and has to retry
+# to register to conductor and there is some wait time between retries.
+timeout 180s sh -c "while ! nova service-list --binary nova-compute | grep 'enabled.*\ up\ '; do sleep 3; done"
+timeout 180s sh -c '''while ! neutron agent-list -f csv -c alive -c agent_type -c host | grep "\":-).*Open vSwitch agent.*\"" ; do sleep 3; done'''
+timeout 180s sh -c "cinder service-list --binary cinder-volume | grep 'enabled.*\ up\ '"
diff --git a/compass-deck/misc/ci/test-install.sh b/compass-deck/misc/ci/test-install.sh
new file mode 100755
index 0000000..22fd5d4
--- /dev/null
+++ b/compass-deck/misc/ci/test-install.sh
@@ -0,0 +1,22 @@
+#!/bin/bash -x
+# create a bridge named 'installation' so that compass and pxeboot vm are in the
+# same l2 network.
+brctl show |grep installation > /dev/null
+if [[ $? -eq 0 ]] ; then
+ echo "bridge already exists"
+else
+ brctl addbr installation
+ brctl addif installation eth1
+fi
+
+ifconfig installation 172.16.0.1 broadcast 172.16.0.0 netmask 255.255.0.0 up
+ifconfig eth1 up
+
+# kill the dhcp service started by libvirt to avoid conflict with dhcpd
+killall dnsmasq
+source compass-core/install/install.conf.template
+/bin/bash -x compass-core/install/install.sh || exit $?
+# echo "cache_peer 10.145.81.137 parent 3128 3130 default" >> /etc/squid/squid.conf
+# service squid restart
+# service squid status |grep running || exit $?
+# sleep 5
diff --git a/compass-deck/misc/compass_install.repo b/compass-deck/misc/compass_install.repo
new file mode 100644
index 0000000..6b97ed0
--- /dev/null
+++ b/compass-deck/misc/compass_install.repo
@@ -0,0 +1,5 @@
+[compass_install]
+name=compass_repo
+baseurl=http://192.168.104.2:9999/download/compass_install/centos7/
+gpgcheck=0
+enabled=1
diff --git a/compass-deck/misc/hosts b/compass-deck/misc/hosts
new file mode 100644
index 0000000..8b39e25
--- /dev/null
+++ b/compass-deck/misc/hosts
@@ -0,0 +1,3 @@
+127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
+::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
+$ipaddr $hostname
diff --git a/compass-deck/misc/logrotate.d/httpd b/compass-deck/misc/logrotate.d/httpd
new file mode 100644
index 0000000..794b9d7
--- /dev/null
+++ b/compass-deck/misc/logrotate.d/httpd
@@ -0,0 +1,9 @@
+/var/log/httpd/*log {
+ missingok
+ notifempty
+ sharedscripts
+ delaycompress
+ postrotate
+ /sbin/service httpd reload > /dev/null 2>/dev/null || true
+ endscript
+}
diff --git a/compass-deck/misc/logrotate.d/ntp b/compass-deck/misc/logrotate.d/ntp
new file mode 100644
index 0000000..6b290d5
--- /dev/null
+++ b/compass-deck/misc/logrotate.d/ntp
@@ -0,0 +1,9 @@
+/var/log/ntp.log {
+ missingok
+ notifempty
+ sharedscripts
+ delaycompress
+ postrotate
+ /sbin/service ntpd reload > /dev/null 2>/dev/null || true
+ endscript
+}
diff --git a/compass-deck/misc/logrotate.d/squid b/compass-deck/misc/logrotate.d/squid
new file mode 100644
index 0000000..1191d23
--- /dev/null
+++ b/compass-deck/misc/logrotate.d/squid
@@ -0,0 +1,9 @@
+/var/log/squid/*log {
+ missingok
+ notifempty
+ sharedscripts
+ delaycompress
+ postrotate
+ /sbin/service squid reload > /dev/null 2>/dev/null || true
+ endscript
+}
diff --git a/compass-deck/misc/logrotate.d/syslog b/compass-deck/misc/logrotate.d/syslog
new file mode 100644
index 0000000..db907ed
--- /dev/null
+++ b/compass-deck/misc/logrotate.d/syslog
@@ -0,0 +1,13 @@
+/var/log/cron
+/var/log/maillog
+/var/log/messages
+/var/log/secure
+/var/log/spooler
+/var/log/dhcpd.log
+/var/log/tftpd.log
+{
+ sharedscripts
+ postrotate
+ /bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true
+ endscript
+}
diff --git a/compass-deck/misc/logrotate.d/yum b/compass-deck/misc/logrotate.d/yum
new file mode 100644
index 0000000..e587f96
--- /dev/null
+++ b/compass-deck/misc/logrotate.d/yum
@@ -0,0 +1,7 @@
+/var/log/yum.log {
+ missingok
+ notifempty
+ size 30k
+ yearly
+ create 0600 root root
+}
diff --git a/compass-deck/misc/logstash-forwarder/logstash-forwarder.conf b/compass-deck/misc/logstash-forwarder/logstash-forwarder.conf
new file mode 100644
index 0000000..d0cffeb
--- /dev/null
+++ b/compass-deck/misc/logstash-forwarder/logstash-forwarder.conf
@@ -0,0 +1,57 @@
+{
+ # The network section covers network configuration :)
+ "network": {
+ # A list of downstream servers listening for our messages.
+ # logstash-forwarder will pick one at random and only switch if
+ # the selected one appears to be dead or unresponsive
+ "servers": [ "www.stack360.io:5000" ],
+
+ # The path to your client ssl certificate (optional)
+ #"ssl certificate": "./logstash-forwarder.crt",
+ # The path to your client ssl key (optional)
+ #"ssl key": "./logstash-forwarder.key",
+
+ # The path to your trusted ssl CA file. This is used
+ # to authenticate your downstream server.
+ "ssl ca": "/etc/pki/tls/certs/logstash-forwarder.crt",
+
+ # Network timeout in seconds. This is most important for
+ # logstash-forwarder determining whether to stop waiting for an
+ # acknowledgement from the downstream server. If an timeout is reached,
+ # logstash-forwarder will assume the connection or server is bad and
+ # will connect to a server chosen at random from the servers list.
+ "timeout": 15
+ },
+
+ # The list of files configurations
+ "files": [
+ {
+ "paths": [
+ "/var/log/compass/celery.log"
+ ]
+ }
+ # An array of hashes. Each hash tells what paths to watch and
+ # what fields to annotate on events from those paths.
+ #{
+ #"paths": [
+ # single paths are fine
+ #"/var/log/messages",
+ # globs are fine too, they will be periodically evaluated
+ # to see if any new files match the wildcard.
+ #"/var/log/*.log"
+ #],
+
+ # A dictionary of fields to annotate on each event.
+ #"fields": { "type": "syslog" }
+ #}, {
+ # A path of "-" means stdin.
+ #"paths": [ "-" ],
+ #"fields": { "type": "stdin" }
+ #}, {
+ #"paths": [
+ #"/var/log/apache/httpd-*.log"
+ #],
+ #"fields": { "type": "apache" }
+ #}
+ ]
+}
diff --git a/compass-deck/misc/logstash-forwarder/logstash-forwarder.crt b/compass-deck/misc/logstash-forwarder/logstash-forwarder.crt
new file mode 100644
index 0000000..90f66b1
--- /dev/null
+++ b/compass-deck/misc/logstash-forwarder/logstash-forwarder.crt
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE-----
+MIIFAzCCAuugAwIBAgIJAKrryFncVfJNMA0GCSqGSIb3DQEBCwUAMBgxFjAUBgNV
+BAMMDSouc3RhY2szNjAuaW8wHhcNMTYwODE4MTcwNzEyWhcNMjYwODE2MTcwNzEy
+WjAYMRYwFAYDVQQDDA0qLnN0YWNrMzYwLmlvMIICIjANBgkqhkiG9w0BAQEFAAOC
+Ag8AMIICCgKCAgEAw4p1OVw8tMeYfk9828FiRLPhYWwHD2OCVwlKr8P3bl974I/P
+PhYTkrjEEe5SDYCWNcO58MxJ5X1vB2uQGNBBUFMni+KOqbVvcbPLL4Mkf8pjLdzD
+2pItE1X7UQ8p1iYBEyAnLoV4MjA7CJ4MmUtOOcCb6keHWEhyJFOj9IzkTjFCbKuL
+Y0paxayQOFlkuEK2d3Aa2HXgTLh3FFUX3kRqOcGg1uxEs2awf0nwP/NwcyfAgTSe
+6yqVjnb3GLYdy283FqvxW1MrZS9UAxp/tAXRgMT8I9L2wSrjnPmrabBv5I+Kuf0p
+EmLmxedOCTQsRYtYGL7TmySYgGuYtt/4UNKaG9tCPBnRXuMzlRClBDIoDhwLEKzC
+LlE8JKyClNgutMKcrHbWlEJiGxooyDC1H9gwkHTi3w7qF9BYYBhkXFN2Sl0mPk0m
+6NMfhQUAeqI0HlOsAX/HLwSWyRl0Nr2rvVJqgbbHRF18pADitQEuc/koT9qhON5f
+BReYhQZIarwPJ/UBgwgadrP79bvWJ5u9Oga6H7yHf49/UYD3gQCvu3/Hxo/IY6AX
+86eZg1ZObD6J0xpWb5jskVSgugar6Xq/h6iRkX8O3ssUdIyIxsIMhtCgxykSmdQY
+FDEIWGZq2kdEVvF6GR/EYJRxvASI+27TXzuxP1UodQQOXa8xySkKu9U5SqMCAwEA
+AaNQME4wHQYDVR0OBBYEFPLilPT6k6rRlxv9kCTW38VYy5AuMB8GA1UdIwQYMBaA
+FPLilPT6k6rRlxv9kCTW38VYy5AuMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggIBAAdDy+R0bGCBQgiF2fGunl+AZ8zd0MZbtL4Kv53gunlhyaPQJ3Z+e3GF
+V+Z9BhvMUXE/XN3bsAIZYJekvclysYLBdK1C8n9Rli+AbBSGjwgttRAXeEqaZCbE
+QrNPukRgHThv6hyJNcf1TnR70xCBlcYOGQkEqWx1g0xrsG8ryGbum0BAG0YWLCYq
+BboP16FGAPjDlb10ysWy7HuGJorf470Kyb2iRfp4PX/zdYbcA8gcggGCT323JwuD
+Qwnd8kyX6+6pNhBJE3gAyPDhqvbFbpmo/Ia5pqmJkL3APee1bVI2itqZB/HnmyZ5
+UCIzqf4uAuEodN6yXiImHt0TagXtL4eEXNE2qjwsFSaHeyP6iXPX5tc8RMHzFQjH
+I3MzROHaeVUxs2rMxETGvq4+DebhGzCBKaJUWfMV6Y1+ovrE0MowcR8nO1Q4YQt3
+to+W5IrjI7zzQ7+4XqZm+Yz1DQ4Kr7s2iyAExvkq7kU5FAow6SLPIaOl7kbas2M4
+fwFisuEQT2Om+hbWWZTJ1T45KU5NjznkPqJZ9dCdyqs2mH7BE4vOkOULq81uFG06
+VnJHcO+wZM1iCLa1hy7F2S4fDTjTBYDcuD5GFkulFLeFH5X7zoPz20OGTRMXZONI
+CHEk1ibp8j/Q6bw1zd0jGm3KDUSx+0/Avfve/e28U4KAdNekrgQf
+-----END CERTIFICATE-----
diff --git a/compass-deck/misc/logstash-forwarder/logstash-forwarder.repo b/compass-deck/misc/logstash-forwarder/logstash-forwarder.repo
new file mode 100644
index 0000000..27d68ba
--- /dev/null
+++ b/compass-deck/misc/logstash-forwarder/logstash-forwarder.repo
@@ -0,0 +1,6 @@
+[logstashforwarder]
+name=logstashforwarder repository
+baseurl=http://packages.elasticsearch.org/logstashforwarder/centos
+gpgcheck=1
+gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
+enabled=1
diff --git a/compass-deck/misc/ntp/ntp.conf b/compass-deck/misc/ntp/ntp.conf
new file mode 100644
index 0000000..e1572f3
--- /dev/null
+++ b/compass-deck/misc/ntp/ntp.conf
@@ -0,0 +1,60 @@
+# For more information about this file, see the man pages
+# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
+
+driftfile /var/lib/ntp/drift
+
+logfile /var/log/ntp.log
+
+# Permit time synchronization with our time source, but do not
+# permit the source to query or modify the service on this system.
+restrict default kod nomodify notrap nopeer noquery
+restrict -6 default kod nomodify notrap nopeer noquery
+
+# Permit all access over the loopback interface. This could
+# be tightened as well, but to do so would effect some of
+# the administrative functions.
+restrict 127.0.0.1
+restrict -6 ::1
+
+# Hosts on local network are less restricted.
+#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
+
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+server 0.centos.pool.ntp.org iburst
+server 1.centos.pool.ntp.org iburst
+server 2.centos.pool.ntp.org iburst
+server 3.centos.pool.ntp.org iburst
+
+#broadcast 192.168.1.255 autokey # broadcast server
+#broadcastclient # broadcast client
+#broadcast 224.0.1.1 autokey # multicast server
+#multicastclient 224.0.1.1 # multicast client
+#manycastserver 239.255.254.254 # manycast server
+#manycastclient 239.255.254.254 autokey # manycast client
+
+# Undisciplined Local Clock. This is a fake driver intended for backup
+# and when no outside source of synchronized time is available.
+server 127.127.1.0 # local clock
+#fudge 127.127.1.0 stratum 10
+
+# Enable public key cryptography.
+#crypto
+
+includefile /etc/ntp/crypto/pw
+
+# Key file containing the keys and key identifiers used when operating
+# with symmetric key cryptography.
+keys /etc/ntp/keys
+
+# Specify the key identifiers which are trusted.
+#trustedkey 4 8 42
+
+# Specify the key identifier to use with the ntpdc utility.
+#requestkey 8
+
+# Specify the key identifier to use with the ntpq utility.
+#controlkey 8
+
+# Enable writing of statistics records.
+#statistics clockstats cryptostats loopstats peerstats
diff --git a/compass-deck/misc/rsync b/compass-deck/misc/rsync
new file mode 100644
index 0000000..1f8b9b1
--- /dev/null
+++ b/compass-deck/misc/rsync
@@ -0,0 +1,14 @@
+# default: off
+# description: The rsync server is a good addition to an ftp server, as it \
+# allows crc checksumming etc.
+service rsync
+{
+ disable = no
+ flags = IPv6
+ socket_type = stream
+ wait = no
+ user = root
+ server = /usr/bin/rsync
+ server_args = --daemon
+ log_on_failure += USERID
+}
diff --git a/compass-deck/misc/rsyslog/rsyslog.conf b/compass-deck/misc/rsyslog/rsyslog.conf
new file mode 100644
index 0000000..306108b
--- /dev/null
+++ b/compass-deck/misc/rsyslog/rsyslog.conf
@@ -0,0 +1,97 @@
+# rsyslog v5 configuration file
+
+# For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html
+# If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html
+
+#### MODULES ####
+
+$ModLoad imuxsock # provides support for local system logging (e.g. via logger command)
+$ModLoad imklog # provides kernel logging support (previously done by rklogd)
+#$ModLoad immark # provides --MARK-- message capability
+
+# Provides UDP syslog reception
+#$ModLoad imudp
+#$UDPServerRun 514
+
+# Provides TCP syslog reception
+$ModLoad imtcp
+$InputTCPServerRun 514
+
+
+$WorkDirectory /var/lib/rsyslog
+
+# Added for chef logfiles
+$template Chef_log,"/var/log/chef/%syslogtag%/chef-client.log"
+$template Raw, "%rawmsg%"
+$template CustomLog, "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n"
+$template Chef_Openstack_log, "/var/log/chef/%syslogtag%/%programname%.log"
+#### GLOBAL DIRECTIVES ####
+
+# Use default timestamp format
+$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
+
+# File syncing capability is disabled by default. This feature is usually not required,
+# not useful and an extreme performance hit
+#$ActionFileEnableSync on
+
+# Include all config files in /etc/rsyslog.d/
+$IncludeConfig /etc/rsyslog.d/*.conf
+
+
+#### RULES ####
+
+# Log all kernel messages to the console.
+# Logging much else clutters up the screen.
+#kern.* /dev/console
+
+# Log anything (except mail) of level info or higher.
+# Don't log private authentication messages!
+syslog.*,daemon.* /var/log/messages
+
+# The authpriv file has restricted access.
+authpriv.* /var/log/secure
+
+# Log all the mail messages in one place.
+mail.* -/var/log/maillog
+
+
+# Log cron stuff
+cron.* /var/log/cron
+
+# Log dhcpd
+local6.* /var/log/dhcpd.log
+
+# Log tftpd
+local5.* /var/log/tftpd.log
+
+# Everybody gets emergency messages
+*.emerg *
+
+# Save news errors of level crit and higher in a special file.
+uucp,news.crit /var/log/spooler
+
+# Save boot messages also to boot.log
+local7.* /var/log/boot.log
+
+
+local3.* -?Chef_log
+
+local4.* -?Chef_Openstack_log;CustomLog
+
+# ### begin forwarding rule ###
+# The statement between the begin ... end define a SINGLE forwarding
+# rule. They belong together, do NOT split them. If you create multiple
+# forwarding rules, duplicate the whole block!
+# Remote Logging (we use TCP for reliable delivery)
+#
+# An on-disk queue is created for this action. If the remote host is
+# down, messages are spooled to disk and sent when it is up again.
+#$WorkDirectory /var/lib/rsyslog # where to place spool files
+#$ActionQueueFileName fwdRule1 # unique name prefix for spool files
+#$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible)
+#$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
+#$ActionQueueType LinkedList # run asynchronously
+#$ActionResumeRetryCount -1 # infinite retries if host is down
+# remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional
+#*.* @@remote-host:514
+# ### end of the forwarding rule ###
diff --git a/compass-deck/misc/snmp/snmp.conf b/compass-deck/misc/snmp/snmp.conf
new file mode 100644
index 0000000..34f8053
--- /dev/null
+++ b/compass-deck/misc/snmp/snmp.conf
@@ -0,0 +1 @@
+mibdirs +/usr/local/share/snmp/mibs
diff --git a/compass-deck/misc/squid/squid.conf b/compass-deck/misc/squid/squid.conf
new file mode 100644
index 0000000..018da20
--- /dev/null
+++ b/compass-deck/misc/squid/squid.conf
@@ -0,0 +1,71 @@
+#
+# Recommended minimum configuration:
+#
+acl manager proto cache_object
+acl localhost src 127.0.0.1/32 ::1
+acl to_localhost dst 127.0.0.0/8 0.0.0.0/32 ::1
+
+# Example rule allowing access from your local networks.
+# Adapt to list your (internal) IP networks from where browsing
+# should be allowed
+acl localnet src $subnet # the subnet of local network
+acl localnet src fc00::/7 # RFC 4193 local private network range
+acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
+
+acl CONNECT method CONNECT
+
+#
+# Recommended minimum Access Permission configuration:
+#
+# Only allow cachemgr access from localhost
+http_access allow manager localhost
+http_access deny manager
+
+# We strongly recommend the following be uncommented to protect innocent
+# web applications running on the proxy server who think the only
+# one who can access services on "localhost" is a local user
+http_access deny to_localhost
+
+#
+# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS
+#
+
+# Example rule allowing access from your local networks.
+# Adapt localnet in the ACL section to list your (internal) IP networks
+# from where browsing should be allowed
+http_access allow localnet
+http_access allow localhost
+
+# And finally deny all other access to this proxy
+http_access deny all
+
+# Squid normally listens to port 3128
+http_port 3128
+
+# We recommend you to use at least the following line.
+# hierarchy_stoplist cgi-bin ?
+
+cache_mem 512 MB
+maximum_object_size_in_memory 512 KB
+maximum_object_size 512 MB
+# Uncomment and adjust the following to add a disk cache directory.
+cache_dir aufs /var/squid/cache 25000 16 256
+
+cache_store_log /var/log/squid/store.log
+
+access_log none
+
+# Leave coredumps in the first cache dir
+coredump_dir /var/spool/squid
+
+# Add any of your own refresh_pattern entries above these.
+refresh_pattern . 86400 50% 518400
+
+quick_abort_min -1 QB
+read_ahead_gap 100 MB
+
+positive_dns_ttl 30 second
+negative_dns_ttl 1 second
+
+pipeline_prefetch on
+request_timeout 15 minute
diff --git a/compass-deck/requirements.txt b/compass-deck/requirements.txt
new file mode 100644
index 0000000..6a3b3c7
--- /dev/null
+++ b/compass-deck/requirements.txt
@@ -0,0 +1,24 @@
+amqplib
+argparse
+celery
+Markdown<2.5
+Cheetah<=2.4.1
+daemon
+Flask
+Flask-Login<=0.3.2
+Flask-RESTful
+Flask-Script
+Flask-SQLAlchemy
+Flask-WTF
+itsdangerous
+importlib
+lazypy
+lockfile
+netaddr
+MySQL-python
+paramiko
+PyChef
+python-daemon==2.1.1
+SQLAlchemy>=0.9.0
+simplejson
+requests
diff --git a/compass-deck/setup.py b/compass-deck/setup.py
new file mode 100644
index 0000000..1907d3d
--- /dev/null
+++ b/compass-deck/setup.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""setup script."""
+try:
+ from setuptools import find_packages
+except ImportError:
+ from ez_setup import use_setuptools
+ use_setuptools()
+
+
+from setuptools.command.test import test as TestCommand
+from setuptools import setup
+
+
+import os
+import sys
+
+
+# This helps python setup.py test command to utilize tox
+# See the instruction at https://testrun.org/tox/latest/example/basic.html\
+# #integration-with-setuptools-distribute-test-commands
+
+class Tox(TestCommand):
+ """Tox to do the setup."""
+
+ def finalize_options(self):
+ TestCommand.finalize_options(self)
+ self.test_args = []
+ self.test_suite = True
+
+ def run_tests(self):
+ import tox
+ errno = tox.cmdline(self.test_args)
+ sys.exit(errno)
+
+
+INSTALL_REQUIRES_FILE = os.path.join(
+ os.path.dirname(__file__), 'requirements.txt')
+with open(INSTALL_REQUIRES_FILE, 'r') as requires_file:
+ REQUIREMENTS = [line.strip() for line in requires_file if line != '\n']
+
+DATA_FILES_DIR = os.path.join(
+ os.path.dirname(__file__), 'conf')
+DATA_FILES = []
+for parent_dir, sub_dirs, files in os.walk(DATA_FILES_DIR):
+ if files == []:
+ pass
+ for file in files:
+ DATA_FILES.append((parent_dir, [os.path.join(parent_dir, file)]))
+
+setup(
+ name='compass-deck',
+ version='0.1.0',
+
+ # general info
+ description="""compass-deck: API of automation framework of
+ system deployment on baremetal""",
+ author='Compass Development Group',
+ author_email='dev@syscompass.org',
+ url='https://github.com/openstack/compass-core',
+ download_url='',
+
+ # dependency
+ install_requires=REQUIREMENTS,
+ packages=find_packages(exclude=['compass.tests']),
+ include_package_data=True,
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Information Technology',
+ 'Intended Audience :: System Administrators',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: POSIX :: Linux',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ ],
+ # data
+ data_files=DATA_FILES,
+ # test,
+ tests_require=['tox'],
+ cmdclass={'test': Tox},
+)
diff --git a/compass-deck/start.sh b/compass-deck/start.sh
new file mode 100755
index 0000000..e4f2b73
--- /dev/null
+++ b/compass-deck/start.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+until mysql -h"compass-db" -u"root" -p"root" -e 'show databases'; do
+ >&2 echo "DB is unavailable - sleeping"
+ sleep 1
+done
+>&2 echo "DB is up"
+systemctl start httpd
diff --git a/compass-deck/tasks/__init__.py b/compass-deck/tasks/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-deck/tasks/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/tasks/client.py b/compass-deck/tasks/client.py
new file mode 100644
index 0000000..ca7ad14
--- /dev/null
+++ b/compass-deck/tasks/client.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to setup celery client.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+
+ .. note::
+ If CELERY_CONFIG_MODULE is set in environment, load celery config from
+ the filename declared in CELERY_CONFIG_MODULE.
+"""
+import os
+
+from celery import Celery
+
+
+celery = Celery(__name__)
+if 'CELERY_CONFIG_MODULE' in os.environ:
+ celery.config_from_envvar('CELERY_CONFIG_MODULE')
+else:
+ from compass.utils import celeryconfig_wrapper as celeryconfig
+ celery.config_from_object(celeryconfig)
diff --git a/compass-deck/tasks/tasks.py b/compass-deck/tasks/tasks.py
new file mode 100644
index 0000000..f649afd
--- /dev/null
+++ b/compass-deck/tasks/tasks.py
@@ -0,0 +1,326 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to define celery tasks.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from celery.signals import celeryd_init
+from celery.signals import setup_logging
+
+from compass.actions import clean
+from compass.actions import delete
+from compass.actions import deploy
+from compass.actions import install_callback
+from compass.actions import patch
+from compass.actions import poll_switch
+from compass.actions import update_progress
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import database
+from compass.db.api import metadata_holder as metadata_api
+from compass.log_analyzor import progress_calculator
+
+from compass.tasks.client import celery
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+@celeryd_init.connect()
+def global_celery_init(**_):
+ """Initialization code."""
+ flags.init()
+ flags.OPTIONS.logfile = setting.CELERY_LOGFILE
+ logsetting.init()
+ database.init()
+ adapter_api.load_adapters()
+ metadata_api.load_metadatas()
+ adapter_api.load_flavors()
+ progress_calculator.load_calculator_configurations()
+
+
+@setup_logging.connect()
+def tasks_setup_logging(**_):
+ """Setup logging options from compass setting."""
+ flags.init()
+ flags.OPTIONS.logfile = setting.CELERY_LOGFILE
+ logsetting.init()
+
+
+@celery.task(name='compass.tasks.pollswitch')
+def pollswitch(
+ poller_email, ip_addr, credentials,
+ req_obj='mac', oper='SCAN'
+):
+ """Query switch and return expected result.
+
+ :param ip_addr: switch ip address.
+ :type ip_addr: str
+ :param credentials: switch credentials
+ :type credentials: dict
+ :param reqObj: the object requested to query from switch.
+ :type reqObj: str
+ :param oper: the operation to query the switch (SCAN, GET, SET).
+ :type oper: str
+ """
+ try:
+ poll_switch.poll_switch(
+ poller_email, ip_addr, credentials,
+ req_obj=req_obj, oper=oper
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.cluster_health')
+def health_check(cluster_id, send_report_url, useremail):
+ """Verify the deployed cluster functionally works.
+
+ :param cluster_id: ID of the cluster
+ :param send_report_url: The URL which reports should send back
+ """
+ try:
+ deploy.health_check(cluster_id, send_report_url, useremail)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.deploy_cluster')
+def deploy_cluster(deployer_email, cluster_id, clusterhost_ids):
+ """Deploy the given cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ :param clusterhost_ids: the id of the hosts in the cluster
+ :type clusterhost_ids: list of int
+ """
+ try:
+ deploy.deploy(cluster_id, clusterhost_ids, deployer_email)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.redeploy_cluster')
+def redeploy_cluster(deployer_email, cluster_id):
+ """Redeploy the given cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ """
+ try:
+ deploy.redeploy(cluster_id, deployer_email)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.patch_cluster')
+def patch_cluster(patcher_email, cluster_id):
+ """Patch the existing cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ """
+ try:
+ patch.patch(cluster_id, patcher_email)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.reinstall_cluster')
+def reinstall_cluster(installer_email, cluster_id, clusterhost_ids):
+ """reinstall the given cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ :param clusterhost_ids: the id of the hosts in the cluster
+ :type clusterhost_ids: list of int
+ """
+ try:
+ deploy.redeploy(cluster_id, clusterhost_ids, installer_email)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.delete_cluster')
+def delete_cluster(
+ deleter_email, cluster_id, clusterhost_ids,
+ delete_underlying_host=False
+):
+ """Delete the given cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ :param clusterhost_ids: the id of the hosts in the cluster
+ :type clusterhost_ids: list of int
+ """
+ try:
+ delete.delete_cluster(
+ cluster_id, clusterhost_ids, deleter_email,
+ delete_underlying_host=delete_underlying_host
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.delete_cluster_host')
+def delete_cluster_host(
+ deleter_email, cluster_id, host_id,
+ delete_underlying_host=False
+):
+ """Delte the given cluster host.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ :param host_id: id of the host
+ :type host_id: int
+ """
+ try:
+ delete.delete_cluster_host(
+ cluster_id, host_id, deleter_email,
+ delete_underlying_host=delete_underlying_host
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.delete_host')
+def delete_host(deleter_email, host_id, cluster_ids):
+ """Delete the given host.
+
+ :param host_id: id of the host
+ :type host_id: int
+ :param cluster_ids: list of cluster id
+ :type cluster_ids: list of int
+ """
+ try:
+ delete.delete_host(
+ host_id, cluster_ids, deleter_email
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.clean_os_installer')
+def clean_os_installer(
+ os_installer_name, os_installer_settings
+):
+ """Clean os installer."""
+ try:
+ clean.clean_os_installer(
+ os_installer_name, os_installer_settings
+ )
+ except Exception as error:
+ logging.excception(error)
+
+
+@celery.task(name='compass.tasks.clean_package_installer')
+def clean_package_installer(
+ package_installer_name, package_installer_settings
+):
+ """Clean package installer."""
+ try:
+ clean.clean_package_installer(
+ package_installer_name, package_installer_settings
+ )
+ except Exception as error:
+ logging.excception(error)
+
+
+@celery.task(name='compass.tasks.poweron_host')
+def poweron_host(host_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.poweroff_host')
+def poweroff_host(host_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.reset_host')
+def reset_host(host_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.poweron_machine')
+def poweron_machine(machine_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.poweroff_machine')
+def poweroff_machine(machine_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.reset_machine')
+def reset_machine(machine_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.os_installed')
+def os_installed(
+ host_id, clusterhosts_ready,
+ clusters_os_ready
+):
+ """callback when os is installed."""
+ try:
+ install_callback.os_installed(
+ host_id, clusterhosts_ready,
+ clusters_os_ready
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.package_installed')
+def package_installed(
+ cluster_id, host_id, cluster_ready, host_ready
+):
+ """callback when package is installed."""
+ try:
+ install_callback.package_installed(
+ cluster_id, host_id, cluster_ready, host_ready
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.cluster_installed')
+def cluster_installed(
+ cluster_id, clusterhosts_ready
+):
+ """callback when package is installed."""
+ try:
+ install_callback.cluster_installed(
+ cluster_id, clusterhosts_ready
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.update_progress')
+def update_clusters_progress():
+ """Calculate the installing progress of the given cluster."""
+ logging.info('update_clusters_progress')
+ try:
+ update_progress.update_progress()
+ except Exception as error:
+ logging.exception(error)
diff --git a/compass-deck/utils/__init__.py b/compass-deck/utils/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-deck/utils/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/utils/celeryconfig_wrapper.py b/compass-deck/utils/celeryconfig_wrapper.py
new file mode 100644
index 0000000..b6644ba
--- /dev/null
+++ b/compass-deck/utils/celeryconfig_wrapper.py
@@ -0,0 +1,44 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""celeryconfig wrapper.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+import os.path
+import urllib
+
+from compass.utils import setting_wrapper as setting
+
+
+# CELERY_RESULT_BACKEND = 'amqp://'
+
+# BROKER_URL = 'amqp://guest:guest@localhost:5672//'
+
+
+CELERY_IMPORTS = ('compass.tasks.tasks',)
+
+
+if setting.CELERYCONFIG_FILE:
+ CELERY_CONFIG = os.path.join(
+ str(setting.CELERYCONFIG_DIR),
+ str(setting.CELERYCONFIG_FILE))
+
+ try:
+ logging.info('load celery config from %s', CELERY_CONFIG)
+ execfile(CELERY_CONFIG, globals(), locals())
+ except Exception as error:
+ logging.exception(error)
+ raise error
diff --git a/compass-deck/utils/daemonize.py b/compass-deck/utils/daemonize.py
new file mode 100644
index 0000000..f02bfb9
--- /dev/null
+++ b/compass-deck/utils/daemonize.py
@@ -0,0 +1,76 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provider util functions in all compass code
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import daemon
+import logging
+import signal
+import sys
+import time
+
+from compass.utils import flags
+
+
+flags.add_bool('daemonize',
+ help='run as daemon',
+ default=False)
+
+
+BUSY = False
+KILLED = False
+
+
+def handle_term(signum, frame):
+ """Handle sig term."""
+ global KILLED
+ logging.info('Caught signal %s in %s', signum, frame)
+ KILLED = True
+ if not BUSY:
+ sys.exit(0)
+
+
+def _daemon(callback, run_interval):
+ """help function to run callback in daemon."""
+ global BUSY
+ signal.signal(signal.SIGTERM, handle_term)
+ signal.signal(signal.SIGHUP, handle_term)
+
+ while True:
+ BUSY = True
+ callback()
+ BUSY = False
+ if KILLED:
+ logging.info('exit loop')
+ break
+
+ if run_interval > 0:
+ logging.info('will rerun after %s seconds',
+ flags.OPTIONS.run_interval)
+ time.sleep(flags.OPTIONS.run_interval)
+ else:
+ logging.info('finish loop')
+ break
+
+
+def daemonize(callback, run_interval, **kwargs):
+ """daemonize callback and run every run_interval seconds."""
+ if flags.OPTIONS.daemonize:
+ with daemon.DaemonContext(**kwargs):
+ logging.info('run as daemon')
+ _daemon(callback, run_interval)
+ else:
+ _daemon(callback, run_interval)
diff --git a/compass-deck/utils/flags.py b/compass-deck/utils/flags.py
new file mode 100644
index 0000000..a3169f5
--- /dev/null
+++ b/compass-deck/utils/flags.py
@@ -0,0 +1,91 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to load flags.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import sys
+
+from optparse import OptionParser
+
+
+class Flags(object):
+ """Class to store flags."""
+
+ PARSER = OptionParser()
+ PARSED_OPTIONS = None
+
+ @classmethod
+ def parse_args(cls):
+ """parse args."""
+ (options, argv) = Flags.PARSER.parse_args()
+ sys.argv = [sys.argv[0]] + argv
+ Flags.PARSED_OPTIONS = options
+
+ def __getattr__(self, name):
+ if Flags.PARSED_OPTIONS and hasattr(Flags.PARSED_OPTIONS, name):
+ return getattr(Flags.PARSED_OPTIONS, name)
+
+ for option in Flags.PARSER.option_list:
+ if option.dest == name:
+ return option.default
+
+ raise AttributeError('Option instance has no attribute %s' % name)
+
+ def __setattr__(self, name, value):
+ if Flags.PARSED_OPTIONS and hasattr(Flags.PARSED_OPTIONS, name):
+ setattr(Flags.PARSED_OPTIONS, name, value)
+ return
+
+ for option in Flags.PARSER.option_list:
+ if option.dest == name:
+ option.default = value
+ return
+
+ object.__setattr__(self, name, value)
+
+
+OPTIONS = Flags()
+
+
+def init():
+ """Init flag parsing."""
+ OPTIONS.parse_args()
+
+
+def add(flagname, **kwargs):
+ """Add a flag name and its setting.
+
+ :param flagname: flag name declared in cmd as --<flagname>=...
+ :type flagname: str
+ """
+ Flags.PARSER.add_option('--%s' % flagname,
+ dest=flagname, **kwargs)
+
+
+def add_bool(flagname, default=True, **kwargs):
+ """Add a bool flag name and its setting.
+
+ :param flagname: flag name declared in cmd as --[no]<flagname>.
+ :type flagname: str
+ :param default: default value
+ :type default: bool
+ """
+ Flags.PARSER.add_option('--%s' % flagname,
+ dest=flagname, default=default,
+ action="store_true", **kwargs)
+ Flags.PARSER.add_option('--no%s' % flagname,
+ dest=flagname,
+ action="store_false", **kwargs)
diff --git a/compass-deck/utils/logsetting.py b/compass-deck/utils/logsetting.py
new file mode 100644
index 0000000..836ebcb
--- /dev/null
+++ b/compass-deck/utils/logsetting.py
@@ -0,0 +1,108 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to setup logging configuration.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+
+import logging
+import logging.handlers
+import os
+import os.path
+import sys
+
+from compass.utils import flags
+from compass.utils import setting_wrapper as setting
+
+
+flags.add('loglevel',
+ help='logging level', default=setting.DEFAULT_LOGLEVEL)
+flags.add('logdir',
+ help='logging directory', default=setting.DEFAULT_LOGDIR)
+flags.add('logfile',
+ help='logging filename', default=None)
+flags.add('log_interval', type='int',
+ help='log interval', default=setting.DEFAULT_LOGINTERVAL)
+flags.add('log_interval_unit',
+ help='log interval unit', default=setting.DEFAULT_LOGINTERVAL_UNIT)
+flags.add('log_format',
+ help='log format', default=setting.DEFAULT_LOGFORMAT)
+flags.add('log_backup_count', type='int',
+ help='log backup count', default=setting.DEFAULT_LOGBACKUPCOUNT)
+
+
+# mapping str setting in flag --loglevel to logging level.
+LOGLEVEL_MAPPING = {
+ 'finest': logging.DEBUG - 2, # more detailed log.
+ 'fine': logging.DEBUG - 1, # detailed log.
+ 'debug': logging.DEBUG,
+ 'info': logging.INFO,
+ 'warning': logging.WARNING,
+ 'error': logging.ERROR,
+ 'critical': logging.CRITICAL,
+}
+
+
+logging.addLevelName(LOGLEVEL_MAPPING['fine'], 'fine')
+logging.addLevelName(LOGLEVEL_MAPPING['finest'], 'finest')
+
+
+# disable logging when logsetting.init not called
+logging.getLogger().setLevel(logging.CRITICAL)
+
+
+def getLevelByName(level_name):
+ """Get log level by level name."""
+ return LOGLEVEL_MAPPING[level_name]
+
+
+def init():
+ """Init loggsetting. It should be called after flags.init."""
+ loglevel = flags.OPTIONS.loglevel.lower()
+ logdir = flags.OPTIONS.logdir
+ logfile = flags.OPTIONS.logfile
+ logger = logging.getLogger()
+ if logger.handlers:
+ for handler in logger.handlers:
+ logger.removeHandler(handler)
+
+ if logdir:
+ if not logfile:
+ logfile = '%s.log' % os.path.basename(sys.argv[0])
+
+ handler = logging.handlers.TimedRotatingFileHandler(
+ os.path.join(logdir, logfile),
+ when=flags.OPTIONS.log_interval_unit,
+ interval=flags.OPTIONS.log_interval,
+ backupCount=flags.OPTIONS.log_backup_count)
+ else:
+ if not logfile:
+ handler = logging.StreamHandler(sys.stderr)
+ else:
+ handler = logging.handlers.TimedRotatingFileHandler(
+ logfile,
+ when=flags.OPTIONS.log_interval_unit,
+ interval=flags.OPTIONS.log_interval,
+ backupCount=flags.OPTIONS.log_backup_count)
+
+ if loglevel in LOGLEVEL_MAPPING:
+ logger.setLevel(LOGLEVEL_MAPPING[loglevel])
+ handler.setLevel(LOGLEVEL_MAPPING[loglevel])
+
+ formatter = logging.Formatter(
+ flags.OPTIONS.log_format)
+
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
diff --git a/compass-deck/utils/setting_wrapper.py b/compass-deck/utils/setting_wrapper.py
new file mode 100644
index 0000000..0b3e9f7
--- /dev/null
+++ b/compass-deck/utils/setting_wrapper.py
@@ -0,0 +1,175 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""comapss setting wrapper.
+
+ .. moduleauthor:: Xiaodong Wang ,xiaodongwang@huawei.com>
+"""
+import datetime
+import lazypy
+import logging
+import os
+import os.path
+
+
+# default setting
+CONFIG_DIR = os.environ.get('COMPASS_CONFIG_DIR', '/etc/compass')
+SQLALCHEMY_DATABASE_URI = 'sqlite://'
+SQLALCHEMY_DATABASE_POOL_TYPE = 'static'
+COBBLER_INSTALLATION_LOGDIR = '/var/log/cobbler/anamon'
+CHEF_INSTALLATION_LOGDIR = '/var/log/chef'
+INSTALLATION_LOGDIR = {
+ 'CobblerInstaller': COBBLER_INSTALLATION_LOGDIR,
+ 'ChefInstaller': CHEF_INSTALLATION_LOGDIR
+}
+CLUSTERHOST_INATALLATION_LOGDIR_NAME = 'name'
+HOST_INSTALLATION_LOGDIR_NAME = 'name'
+DEFAULT_LOGLEVEL = 'debug'
+DEFAULT_LOGDIR = '/tmp'
+DEFAULT_LOGINTERVAL = 1
+DEFAULT_LOGINTERVAL_UNIT = 'h'
+DEFAULT_LOGFORMAT = (
+ '%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s')
+DEFAULT_LOGBACKUPCOUNT = 5
+WEB_LOGFILE = ''
+CELERY_LOGFILE = ''
+CELERYCONFIG_DIR = lazypy.delay(lambda: CONFIG_DIR)
+CELERYCONFIG_FILE = ''
+PROGRESS_UPDATE_INTERVAL = 30
+POLLSWITCH_INTERVAL = 60
+SWITCHES = [
+]
+
+USER_AUTH_HEADER_NAME = 'X-Auth-Token'
+USER_TOKEN_DURATION = '2h'
+COMPASS_ADMIN_EMAIL = 'admin@huawei.com'
+COMPASS_ADMIN_PASSWORD = 'admin'
+COMPASS_DEFAULT_PERMISSIONS = [
+ 'list_permissions',
+]
+SWITCHES_DEFAULT_FILTERS = []
+DEFAULT_SWITCH_IP = '0.0.0.0'
+DEFAULT_SWITCH_PORT = 0
+
+COMPASS_SUPPORTED_PROXY = 'http://127.0.0.1:3128'
+COMPASS_SUPPORTED_DEFAULT_NOPROXY = ['127.0.0.1']
+COMPASS_SUPPORTED_NTP_SERVER = '127.0.0.1'
+COMPASS_SUPPORTED_DNS_SERVERS = ['127.0.0.1']
+COMPASS_SUPPORTED_DOMAINS = []
+COMPASS_SUPPORTED_DEFAULT_GATEWAY = '127.0.0.1'
+COMPASS_SUPPORTED_LOCAL_REPO = 'http://127.0.0.1'
+
+PROGRESS_UPDATE_PID_FILE = '/var/run/progress_update.pid'
+
+PROXY_URL_PREFIX = 'http://10.145.81.205:5000'
+
+OS_INSTALLER_DIR = ''
+PACKAGE_INSTALLER_DIR = ''
+OS_DIR = ''
+ADAPTER_DIR = ''
+OS_METADATA_DIR = ''
+PACKAGE_METADATA_DIR = ''
+FLAVOR_METADATA_DIR = ''
+OS_FIELD_DIR = ''
+PACKAGE_FIELD_DIR = ''
+FLAVOR_FIELD_DIR = ''
+ADAPTER_ROLE_DIR = ''
+ADAPTER_FLAVOR_DIR = ''
+VALIDATOR_DIR = ''
+CALLBACK_DIR = ''
+TMPL_DIR = ''
+MACHINE_LIST_DIR = ''
+PROGRESS_CALCULATOR_DIR = ''
+OS_MAPPING_DIR = ''
+FLAVOR_MAPPING_DIR = ''
+PLUGINS_DIR = ''
+
+if (
+ 'COMPASS_IGNORE_SETTING' in os.environ and
+ os.environ['COMPASS_IGNORE_SETTING']
+):
+ pass
+else:
+ if 'COMPASS_SETTING' in os.environ:
+ SETTING = os.environ['COMPASS_SETTING']
+ else:
+ SETTING = '/etc/compass/setting'
+
+ try:
+ logging.info('load setting from %s', SETTING)
+ execfile(SETTING, globals(), locals())
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+if not OS_INSTALLER_DIR:
+ OS_INSTALLER_DIR = os.path.join(CONFIG_DIR, 'os_installer')
+
+if not PACKAGE_INSTALLER_DIR:
+ PACKAGE_INSTALLER_DIR = os.path.join(CONFIG_DIR, 'package_installer')
+
+if not OS_DIR:
+ OS_DIR = os.path.join(CONFIG_DIR, 'os')
+
+if not ADAPTER_DIR:
+ ADAPTER_DIR = os.path.join(CONFIG_DIR, 'adapter')
+
+if not OS_METADATA_DIR:
+ OS_METADATA_DIR = os.path.join(CONFIG_DIR, 'os_metadata')
+
+if not PACKAGE_METADATA_DIR:
+ PACKAGE_METADATA_DIR = os.path.join(CONFIG_DIR, 'package_metadata')
+
+if not FLAVOR_METADATA_DIR:
+ FLAVOR_METADATA_DIR = os.path.join(CONFIG_DIR, 'flavor_metadata')
+
+if not OS_FIELD_DIR:
+ OS_FIELD_DIR = os.path.join(CONFIG_DIR, 'os_field')
+
+if not PACKAGE_FIELD_DIR:
+ PACKAGE_FIELD_DIR = os.path.join(CONFIG_DIR, 'package_field')
+
+if not FLAVOR_FIELD_DIR:
+ FLAVOR_FIELD_DIR = os.path.join(CONFIG_DIR, 'flavor_field')
+
+if not ADAPTER_ROLE_DIR:
+ ADAPTER_ROLE_DIR = os.path.join(CONFIG_DIR, 'role')
+
+if not ADAPTER_FLAVOR_DIR:
+ ADAPTER_FLAVOR_DIR = os.path.join(CONFIG_DIR, 'flavor')
+
+if not VALIDATOR_DIR:
+ VALIDATOR_DIR = os.path.join(CONFIG_DIR, 'validator')
+
+if not CALLBACK_DIR:
+ CALLBACK_DIR = os.path.join(CONFIG_DIR, 'callback')
+
+if not TMPL_DIR:
+ TMPL_DIR = os.path.join(CONFIG_DIR, 'templates')
+
+if not MACHINE_LIST_DIR:
+ MACHINE_LIST_DIR = os.path.join(CONFIG_DIR, 'machine_list')
+
+if not PROGRESS_CALCULATOR_DIR:
+ PROGRESS_CALCULATOR_DIR = os.path.join(CONFIG_DIR, 'progress_calculator')
+
+if not OS_MAPPING_DIR:
+ OS_MAPPING_DIR = os.path.join(CONFIG_DIR, 'os_mapping')
+
+if not FLAVOR_MAPPING_DIR:
+ FLAVOR_MAPPING_DIR = os.path.join(CONFIG_DIR, 'flavor_mapping')
+
+if not PLUGINS_DIR:
+ PLUGINS_DIR = os.environ.get('COMPASS_PLUGINS_DIR',
+ os.path.join(CONFIG_DIR, 'plugins'))
diff --git a/compass-deck/utils/util.py b/compass-deck/utils/util.py
new file mode 100644
index 0000000..39978ca
--- /dev/null
+++ b/compass-deck/utils/util.py
@@ -0,0 +1,395 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provider util functions in all compass code
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+
+import crypt
+import datetime
+import logging
+import os
+import os.path
+import re
+import setting_wrapper as setting
+import sys
+import warnings
+
+
+def deprecated(func):
+ """This is a decorator which can be used to mark functions as deprecated.
+
+ It will result in a warning being emitted when the function is used.
+ """
+ def new_func(*args, **kwargs):
+ warnings.warn(
+ "Call to deprecated function %s." % func.__name__,
+ category=DeprecationWarning
+ )
+ return func(*args, **kwargs)
+
+ new_func.__name__ = func.__name__
+ new_func.__doc__ = func.__doc__
+ new_func.__dict__.update(func.__dict__)
+ return new_func
+
+
+def parse_datetime(date_time, exception_class=Exception):
+ """Parse datetime str to get datetime object.
+
+ The date time format is %Y-%m-%d %H:%M:%S
+ """
+ try:
+ return datetime.datetime.strptime(
+ date_time, '%Y-%m-%d %H:%M:%S'
+ )
+ except Exception as error:
+ logging.exception(error)
+ raise exception_class(
+ 'date time %s format is invalid' % date_time
+ )
+
+
+def parse_datetime_range(date_time_range, exception_class=Exception):
+ """parse datetime range str to pair of datetime objects.
+
+ The date time range format is %Y-%m-%d %H:%M:%S,%Y-%m-%d %H:%M:%S
+ """
+ try:
+ start, end = date_time_range.split(',')
+ except Exception as error:
+ logging.exception(error)
+ raise exception_class(
+ 'there is no `,` in date time range %s' % date_time_range
+ )
+ if start:
+ start_datetime = parse_datetime(start, exception_class)
+ else:
+ start_datetime = None
+ if end:
+ end_datetime = parse_datetime(end, exception_class)
+ else:
+ end_datetime = None
+ return start_datetime, end_datetime
+
+
+def parse_request_arg_dict(arg, exception_class=Exception):
+ """parse string to dict.
+
+ The str is formatted like a=b;c=d and parsed to
+ {'a': 'b', 'c': 'd'}
+ """
+ arg_dict = {}
+ arg_pairs = arg.split(';')
+ for arg_pair in arg_pairs:
+ try:
+ arg_name, arg_value = arg_pair.split('=', 1)
+ except Exception as error:
+ logging.exception(error)
+ raise exception_class(
+ 'there is no `=` in %s' % arg_pair
+ )
+ arg_dict[arg_name] = arg_value
+ return arg_dict
+
+
+def format_datetime(date_time):
+ """Generate string from datetime object."""
+ return date_time.strftime("%Y-%m-%d %H:%M:%S")
+
+
+def merge_dict(lhs, rhs, override=True):
+ """Merge nested right dict into left nested dict recursively.
+
+ :param lhs: dict to be merged into.
+ :type lhs: dict
+ :param rhs: dict to merge from.
+ :type rhs: dict
+ :param override: the value in rhs overide the value in left if True.
+ :type override: boolean
+ """
+ if not isinstance(lhs, dict) or not isinstance(rhs, dict):
+ if override:
+ return rhs
+ else:
+ return lhs
+
+ for key, value in rhs.items():
+ if key not in lhs:
+ lhs[key] = rhs[key]
+ else:
+ lhs[key] = merge_dict(lhs[key], value, override)
+
+ return lhs
+
+
+def recursive_merge_dict(name, all_dicts, parents):
+ """Recursively merge parent dict into base dict."""
+ parent_name = parents.get(name, None)
+ base_dict = all_dicts.get(name, {})
+ if not parent_name:
+ return base_dict
+ merged = recursive_merge_dict(parent_name, all_dicts, parents)
+ return merge_dict(base_dict, merged, override=False)
+
+
+def encrypt(value, crypt_method=None):
+ """Get encrypted value."""
+ if not crypt_method:
+ if hasattr(crypt, 'METHOD_MD5'):
+ crypt_method = crypt.METHOD_MD5
+ else:
+ # for python2.7, copy python2.6 METHOD_MD5 logic here.
+ from random import choice
+ import string
+
+ _saltchars = string.ascii_letters + string.digits + './'
+
+ def _mksalt():
+ """generate salt."""
+ salt = '$1$'
+ salt += ''.join(choice(_saltchars) for _ in range(8))
+ return salt
+
+ crypt_method = _mksalt()
+
+ return crypt.crypt(value, crypt_method)
+
+
+def parse_time_interval(time_interval_str):
+ """parse string of time interval to time interval.
+
+ supported time interval unit: ['d', 'w', 'h', 'm', 's']
+ Examples:
+ time_interval_str: '3d 2h' time interval to 3 days and 2 hours.
+ """
+ if not time_interval_str:
+ return 0
+
+ time_interval_tuple = [
+ time_interval_element
+ for time_interval_element in time_interval_str.split(' ')
+ if time_interval_element
+ ]
+ time_interval_dict = {}
+ time_interval_unit_mapping = {
+ 'd': 'days',
+ 'w': 'weeks',
+ 'h': 'hours',
+ 'm': 'minutes',
+ 's': 'seconds'
+ }
+ for time_interval_element in time_interval_tuple:
+ mat = re.match(r'^([+-]?\d+)(w|d|h|m|s).*', time_interval_element)
+ if not mat:
+ continue
+
+ time_interval_value = int(mat.group(1))
+ time_interval_unit = time_interval_unit_mapping[mat.group(2)]
+ time_interval_dict[time_interval_unit] = (
+ time_interval_dict.get(time_interval_unit, 0) + time_interval_value
+ )
+
+ time_interval = datetime.timedelta(**time_interval_dict)
+ if sys.version_info[0:2] > (2, 6):
+ return time_interval.total_seconds()
+ else:
+ return (
+ time_interval.microseconds + (
+ time_interval.seconds + time_interval.days * 24 * 3600
+ ) * 1e6
+ ) / 1e6
+
+
+def get_plugins_config_files(name, suffix=".conf"):
+ """walk through each of plugin to find all the config files in the"""
+ """name directory"""
+
+ plugins_path = setting.PLUGINS_DIR
+ files = []
+ if os.path.exists(plugins_path):
+ for plugin in os.listdir(plugins_path):
+ plugin_path = os.path.join(plugins_path, plugin)
+ plugin_config = os.path.join(plugin_path, name)
+ if os.path.exists(plugin_config):
+ for component in os.listdir(plugin_config):
+ if not component.endswith(suffix):
+ continue
+ files.append(os.path.join(plugin_config, component))
+ return files
+
+
+def load_configs(
+ config_dir, config_name_suffix='.conf',
+ env_globals={}, env_locals={}
+):
+ """Load configurations from config dir."""
+ """The config file could be in the config_dir or in plugins config_dir"""
+ """The plugins config_dir is formed as, for example /etc/compass/adapter"""
+ """Then the plugins config_dir is /etc/compass/plugins/xxx/adapter"""
+
+ # TODO(Carl) instead of using config_dir, it should use a name such as
+ # adapter etc, however, doing it requires a lot client sites changes,
+ # will do it later.
+
+ configs = []
+ config_files = []
+ config_dir = str(config_dir)
+
+ """search for config_dir"""
+ if os.path.exists(config_dir):
+ for component in os.listdir(config_dir):
+ if not component.endswith(config_name_suffix):
+ continue
+ config_files.append(os.path.join(config_dir, component))
+
+ """search for plugins config_dir"""
+ index = config_dir.rfind("/")
+
+ config_files.extend(get_plugins_config_files(config_dir[index + 1:],
+ config_name_suffix))
+
+ if not config_files:
+ logging.error('path %s and plugins does not exist', config_dir)
+ for path in config_files:
+ logging.debug('load config from %s', path)
+ config_globals = {}
+ config_globals.update(env_globals)
+ config_locals = {}
+ config_locals.update(env_locals)
+ try:
+ execfile(path, config_globals, config_locals)
+ except Exception as error:
+ logging.exception(error)
+ raise error
+ configs.append(config_locals)
+ return configs
+
+
+def pretty_print(*contents):
+ """pretty print contents."""
+ if len(contents) == 0:
+ print ""
+ else:
+ print "\n".join(content for content in contents)
+
+
+def get_switch_machines_from_file(filename):
+ """get switch machines from file."""
+ switches = []
+ switch_machines = {}
+ with open(filename) as switch_file:
+ for line in switch_file:
+ line = line.strip()
+ if not line:
+ # ignore empty line
+ continue
+
+ if line.startswith('#'):
+ # ignore comments
+ continue
+
+ columns = [column for column in line.split(',')]
+ if not columns:
+ # ignore empty line
+ continue
+
+ if columns[0] == 'switch':
+ (switch_ip, switch_vendor, switch_version,
+ switch_community, switch_state) = columns[1:]
+ switches.append({
+ 'ip': switch_ip,
+ 'vendor': switch_vendor,
+ 'credentials': {
+ 'version': switch_version,
+ 'community': switch_community,
+ },
+ 'state': switch_state,
+ })
+ elif columns[0] == 'machine':
+ switch_ip, switch_port, mac = columns[1:]
+ switch_machines.setdefault(switch_ip, []).append({
+ 'mac': mac,
+ 'port': switch_port,
+ })
+
+ return (switches, switch_machines)
+
+
+def execute_cli_by_ssh(cmd, host, username, password=None,
+ keyfile='/root/.ssh/id_rsa', nowait=False):
+ """SSH to execute script on remote machine
+
+ :param host: ip of the remote machine
+ :param username: username to access the remote machine
+ :param password: password to access the remote machine
+ :param cmd: command to execute
+
+ """
+ if not cmd:
+ logging.error("No command found!")
+ raise Exception('No command found!')
+
+ if nowait:
+ cmd = "nohup %s >/dev/null 2>&1 &" % cmd
+
+ stdin = None
+ stdout = None
+ stderr = None
+ try:
+ import paramiko
+ from paramiko import ssh_exception
+
+ client = paramiko.SSHClient()
+ client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
+ if password:
+ client.connect(host, username=username, password=password)
+ else:
+ client.load_system_host_keys()
+ client.connect(
+ host, username=username,
+ key_filename=keyfile, look_for_keys=True
+ )
+ stdin, stdout, stderr = client.exec_command(cmd)
+ result = stdout.readlines()
+ logging.info("result of command '%s' is '%s'!" % (cmd, result))
+ return result
+
+ except ImportError:
+ err_msg = "Cannot find Paramiko package!"
+ logging.error(err_msg)
+ raise ImportError(err_msg)
+
+ except (ssh_exception.BadHostKeyException,
+ ssh_exception.AuthenticationException,
+ ssh_exception.SSHException):
+
+ err_msg = 'SSH connection error or command execution failed!'
+ logging.error(err_msg)
+ raise Exception(err_msg)
+
+ except Exception as exc:
+ logging.error(
+ 'Failed to execute command "%s", exception is %s' % (cmd, exc)
+ )
+ raise Exception(exc)
+
+ finally:
+ for resource in [stdin, stdout, stderr]:
+ if resource:
+ resource.close()
+
+ client.close()