summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHarry Huang <huangxiangyu5@huawei.com>2017-11-01 11:56:50 +0800
committerHarry Huang <huangxiangyu5@huawei.com>2017-11-03 11:48:46 +0800
commit905b0231e93ce2409a45dd6c4f5f983689fdb790 (patch)
tree8b3c8c78773194e048072368fe793135a05e44f1
parent3656ab7b5e3f2f26f7c98f9dcc97b3c461fa2a76 (diff)
Add compass-deck
RESTful API and DB Handlers for Compass Change-Id: I1ce411f279943764c286ea48dca9185d453cf254 Signed-off-by: Harry Huang <huangxiangyu5@huawei.com>
-rw-r--r--compass-deck/Dockerfile9
-rw-r--r--compass-deck/README.md2
-rw-r--r--compass-deck/actions/__init__.py13
-rw-r--r--compass-deck/actions/clean.py192
-rw-r--r--compass-deck/actions/cli.py179
-rw-r--r--compass-deck/actions/install_callback.py181
-rw-r--r--compass-deck/actions/poll_switch.py162
-rw-r--r--compass-deck/actions/update_progress.py298
-rw-r--r--compass-deck/actions/util.py342
-rw-r--r--compass-deck/api/__init__.py42
-rw-r--r--compass-deck/api/api.0
-rw-r--r--compass-deck/api/api.py3391
-rw-r--r--compass-deck/api/api.raml4027
-rw-r--r--compass-deck/api/auth_handler.py49
-rw-r--r--compass-deck/api/exception_handler.py92
-rw-r--r--compass-deck/api/utils.py35
-rw-r--r--compass-deck/api/v1/__init__.py0
-rw-r--r--compass-deck/api/v1/api.py248
-rw-r--r--compass-deck/apiclient/__init__.py0
-rwxr-xr-xcompass-deck/apiclient/example.py463
-rw-r--r--compass-deck/apiclient/restful.py1102
-rw-r--r--compass-deck/apiclient/v1/__init__.py0
-rwxr-xr-xcompass-deck/apiclient/v1/example.py305
-rw-r--r--compass-deck/apiclient/v1/restful.py655
-rw-r--r--compass-deck/bin/README.md66
-rwxr-xr-xcompass-deck/bin/ansible_callbacks/playbook_done.py96
-rwxr-xr-xcompass-deck/bin/chef/addcookbooks.py54
-rwxr-xr-xcompass-deck/bin/chef/adddatabags.py71
-rwxr-xr-xcompass-deck/bin/chef/addroles.py62
-rwxr-xr-xcompass-deck/bin/chef/clean_clients.sh6
-rwxr-xr-xcompass-deck/bin/chef/clean_environments.sh13
-rwxr-xr-xcompass-deck/bin/chef/clean_nodes.sh6
-rwxr-xr-xcompass-deck/bin/clean_installation_logs.py50
-rwxr-xr-xcompass-deck/bin/clean_installers.py163
-rwxr-xr-xcompass-deck/bin/client.py1006
-rwxr-xr-xcompass-deck/bin/client.sh2
-rwxr-xr-xcompass-deck/bin/cobbler/remove_systems.sh9
-rwxr-xr-xcompass-deck/bin/compass_check.py31
-rwxr-xr-xcompass-deck/bin/compass_wsgi.py42
-rwxr-xr-xcompass-deck/bin/compassd43
-rwxr-xr-xcompass-deck/bin/csvdeploy.py333
-rwxr-xr-xcompass-deck/bin/delete_clusters.py73
-rwxr-xr-xcompass-deck/bin/manage_db.py165
-rwxr-xr-xcompass-deck/bin/poll_switch.py113
-rwxr-xr-xcompass-deck/bin/progress_update.py72
-rwxr-xr-xcompass-deck/bin/query_switch.py143
-rwxr-xr-xcompass-deck/bin/refresh.sh3
-rwxr-xr-xcompass-deck/bin/refresh_agent.sh22
-rwxr-xr-xcompass-deck/bin/refresh_server.sh22
-rwxr-xr-xcompass-deck/bin/runserver.py37
-rwxr-xr-xcompass-deck/bin/switch_virtualenv.py30
-rwxr-xr-xcompass-deck/build.sh66
-rw-r--r--compass-deck/db/__init__.py13
-rw-r--r--compass-deck/db/api/__init__.py13
-rw-r--r--compass-deck/db/api/adapter.py313
-rw-r--r--compass-deck/db/api/adapter_holder.py155
-rw-r--r--compass-deck/db/api/cluster.py2444
-rw-r--r--compass-deck/db/api/database.py264
-rw-r--r--compass-deck/db/api/health_check_report.py190
-rw-r--r--compass-deck/db/api/host.py1120
-rw-r--r--compass-deck/db/api/machine.py317
-rw-r--r--compass-deck/db/api/metadata.py517
-rw-r--r--compass-deck/db/api/metadata_holder.py731
-rw-r--r--compass-deck/db/api/network.py160
-rw-r--r--compass-deck/db/api/permission.py357
-rw-r--r--compass-deck/db/api/switch.py1213
-rw-r--r--compass-deck/db/api/user.py553
-rw-r--r--compass-deck/db/api/user_log.py82
-rw-r--r--compass-deck/db/api/utils.py1286
-rw-r--r--compass-deck/db/callback.py204
-rw-r--r--compass-deck/db/config_validation/__init__.py0
-rw-r--r--compass-deck/db/config_validation/default_validator.py131
-rw-r--r--compass-deck/db/config_validation/extension/__init__.py0
-rw-r--r--compass-deck/db/config_validation/extension/openstack.py18
-rw-r--r--compass-deck/db/exception.py116
-rw-r--r--compass-deck/db/models.py1924
-rw-r--r--compass-deck/db/v1/model.py724
-rw-r--r--compass-deck/db/validator.py195
-rw-r--r--compass-deck/deployment/__init__.py15
-rw-r--r--compass-deck/deployment/deploy_manager.py237
-rw-r--r--compass-deck/deployment/installers/__init__.py21
-rw-r--r--compass-deck/deployment/installers/config_manager.py527
-rw-r--r--compass-deck/deployment/installers/installer.py291
-rw-r--r--compass-deck/deployment/installers/os_installers/__init__.py13
-rw-r--r--compass-deck/deployment/installers/os_installers/cobbler/__init__.py13
-rw-r--r--compass-deck/deployment/installers/os_installers/cobbler/cobbler.py449
-rw-r--r--compass-deck/deployment/installers/pk_installers/__init__.py13
-rw-r--r--compass-deck/deployment/installers/pk_installers/ansible_installer/__init__.py0
-rw-r--r--compass-deck/deployment/installers/pk_installers/ansible_installer/ansible_installer.py401
-rw-r--r--compass-deck/deployment/utils/__init__.py15
-rw-r--r--compass-deck/deployment/utils/constants.py84
-rw-r--r--compass-deck/misc/Dockerfile86
-rw-r--r--compass-deck/misc/adapter_changes/Debian.yml18
-rw-r--r--compass-deck/misc/adapter_changes/HA-ansible-multinodes.yml239
-rw-r--r--compass-deck/misc/adapter_changes/keystone_install.yml74
-rw-r--r--compass-deck/misc/adapter_changes/preseed_post_anamon_local80
-rw-r--r--compass-deck/misc/adapter_changes/preseed_post_anamon_remote80
-rw-r--r--compass-deck/misc/apache/README15
-rw-r--r--compass-deck/misc/apache/cobbler_web.conf10
-rw-r--r--compass-deck/misc/apache/http_pip.conf9
-rw-r--r--compass-deck/misc/apache/images.conf9
-rw-r--r--compass-deck/misc/apache/ods-server.conf18
-rw-r--r--compass-deck/misc/apache/packages.conf9
-rw-r--r--compass-deck/misc/apache/ssl.conf221
-rw-r--r--compass-deck/misc/chef-server/chef-server.rb4
-rwxr-xr-xcompass-deck/misc/ci/prepare_node_compass.sh28
-rwxr-xr-xcompass-deck/misc/ci/pxe-deploy.sh14
-rwxr-xr-xcompass-deck/misc/ci/pxe-prepare.sh29
-rwxr-xr-xcompass-deck/misc/ci/tempest_run.sh81
-rwxr-xr-xcompass-deck/misc/ci/test-install.sh22
-rw-r--r--compass-deck/misc/compass_install.repo5
-rw-r--r--compass-deck/misc/hosts3
-rw-r--r--compass-deck/misc/logrotate.d/httpd9
-rw-r--r--compass-deck/misc/logrotate.d/ntp9
-rw-r--r--compass-deck/misc/logrotate.d/squid9
-rw-r--r--compass-deck/misc/logrotate.d/syslog13
-rw-r--r--compass-deck/misc/logrotate.d/yum7
-rw-r--r--compass-deck/misc/logstash-forwarder/logstash-forwarder.conf57
-rw-r--r--compass-deck/misc/logstash-forwarder/logstash-forwarder.crt29
-rw-r--r--compass-deck/misc/logstash-forwarder/logstash-forwarder.repo6
-rw-r--r--compass-deck/misc/ntp/ntp.conf60
-rw-r--r--compass-deck/misc/rsync14
-rw-r--r--compass-deck/misc/rsyslog/rsyslog.conf97
-rw-r--r--compass-deck/misc/snmp/snmp.conf1
-rw-r--r--compass-deck/misc/squid/squid.conf71
-rw-r--r--compass-deck/requirements.txt24
-rw-r--r--compass-deck/setup.py98
-rwxr-xr-xcompass-deck/start.sh7
-rw-r--r--compass-deck/tasks/__init__.py13
-rw-r--r--compass-deck/tasks/client.py33
-rw-r--r--compass-deck/tasks/tasks.py326
-rw-r--r--compass-deck/utils/__init__.py13
-rw-r--r--compass-deck/utils/celeryconfig_wrapper.py44
-rw-r--r--compass-deck/utils/daemonize.py76
-rw-r--r--compass-deck/utils/flags.py91
-rw-r--r--compass-deck/utils/logsetting.py108
-rw-r--r--compass-deck/utils/setting_wrapper.py175
-rw-r--r--compass-deck/utils/util.py395
138 files changed, 32544 insertions, 0 deletions
diff --git a/compass-deck/Dockerfile b/compass-deck/Dockerfile
new file mode 100644
index 0000000..764e7f1
--- /dev/null
+++ b/compass-deck/Dockerfile
@@ -0,0 +1,9 @@
+FROM huangxiangyu/centos-systemd
+
+ADD . /root/compass-deck
+
+RUN /root/compass-deck/build.sh
+
+EXPOSE 80
+
+CMD ["/sbin/init", "/usr/local/bin/start.sh"]
diff --git a/compass-deck/README.md b/compass-deck/README.md
new file mode 100644
index 0000000..c17dbe1
--- /dev/null
+++ b/compass-deck/README.md
@@ -0,0 +1,2 @@
+# compass-deck
+RESTful API for Compass
diff --git a/compass-deck/actions/__init__.py b/compass-deck/actions/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-deck/actions/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/actions/clean.py b/compass-deck/actions/clean.py
new file mode 100644
index 0000000..8cb00b5
--- /dev/null
+++ b/compass-deck/actions/clean.py
@@ -0,0 +1,192 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to clean installers
+"""
+import chef
+import logging
+import xmlrpclib
+
+from compass.actions import util
+
+
+class CobblerInstaller(object):
+ """cobbler installer"""
+ CREDENTIALS = "credentials"
+ USERNAME = 'username'
+ PASSWORD = 'password'
+
+ INSTALLER_URL = "cobbler_url"
+
+ def __init__(self, settings):
+ username = settings[self.CREDENTIALS][self.USERNAME]
+ password = settings[self.CREDENTIALS][self.PASSWORD]
+ cobbler_url = settings[self.INSTALLER_URL]
+ try:
+ self.remote = xmlrpclib.Server(cobbler_url)
+ self.token = self.remote.login(username, password)
+ logging.info('cobbler %s client created', cobbler_url)
+ except Exception as error:
+ logging.error(
+ 'failed to login %s with (%s, %s)',
+ cobbler_url, username, password
+ )
+ logging.exception(error)
+
+ def clean(self):
+ systems = self.remote.get_systems()
+ for system in systems:
+ system_name = system['name']
+ try:
+ self.remote.remove_system(system_name, self.token)
+ logging.info('system %s is removed', system_name)
+ except Exception as error:
+ logging.error(
+ 'failed to remove system %s', system_name
+ )
+ logging.exception(error)
+
+
+class AnsibleInstaller(object):
+
+ def __init__(self, settings):
+ return
+
+ def clean(self):
+ pass
+
+
+class ChefInstaller(object):
+ DATABAGS = "databags"
+ CHEFSERVER_URL = "chef_url"
+ CHEFSERVER_DNS = "chef_server_dns"
+ CHEFSERVER_IP = "chef_server_ip"
+ KEY_DIR = "key_dir"
+ CLIENT = "client_name"
+
+ def __init__(self, settings):
+ installer_url = settings.get(self.CHEFSERVER_URL, None)
+ key_dir = settings.get(self.KEY_DIR, None)
+ client = settings.get(self.CLIENT, None)
+ try:
+ if installer_url and key_dir and client:
+ self.api = chef.ChefAPI(installer_url, key_dir, client)
+ else:
+ self.api = chef.autoconfigure()
+ logging.info(
+ 'chef client created %s(%s, %s)',
+ installer_url, key_dir, client
+ )
+ except Exception as error:
+ logging.error(
+ 'failed to create chef client %s(%s, %s)',
+ installer_url, key_dir, client
+ )
+ logging.exception(error)
+
+ def clean(self):
+ try:
+ for node_name in chef.Node.list(api=self.api):
+ node = chef.Node(node_name, api=self.api)
+ node.delete()
+ logging.info('delete node %s', node_name)
+ except Exception as error:
+ logging.error('failed to delete some nodes')
+ logging.exception(error)
+
+ try:
+ for client_name in chef.Client.list(api=self.api):
+ if client_name in ['chef-webui', 'chef-validator']:
+ continue
+ client = chef.Client(client_name, api=self.api)
+ client.delete()
+ logging.info('delete client %s', client_name)
+ except Exception as error:
+ logging.error('failed to delete some clients')
+ logging.exception(error)
+
+ try:
+ for env_name in chef.Environment.list(api=self.api):
+ if env_name == '_default':
+ continue
+ env = chef.Environment(env_name, api=self.api)
+ env.delete()
+ logging.info('delete env %s', env_name)
+ except Exception as error:
+ logging.error('failed to delete some envs')
+ logging.exception(error)
+
+ try:
+ for databag_name in chef.DataBag.list(api=self.api):
+ databag = chef.DataBag(databag_name, api=self.api)
+ for item_name, item in databag.items():
+ item.delete()
+ logging.info(
+ 'delete item %s from databag %s',
+ item_name, databag_name
+ )
+ except Exception as error:
+ logging.error('failed to delete some databag items')
+ logging.exception(error)
+
+
+OS_INSTALLERS = {
+ 'cobbler': CobblerInstaller
+}
+PK_INSTALLERS = {
+ 'chef_installer': ChefInstaller,
+ 'ansible_installer': AnsibleInstaller
+}
+
+
+def clean_os_installer(
+ os_installer_name, os_installer_settings
+):
+ with util.lock('serialized_action', timeout=100) as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to clean os installer'
+ )
+
+ if os_installer_name not in OS_INSTALLERS:
+ logging.error(
+ '%s not found in os_installers',
+ os_installer_name
+ )
+
+ os_installer = OS_INSTALLERS[os_installer_name](
+ os_installer_settings
+ )
+ os_installer.clean()
+
+
+def clean_package_installer(
+ package_installer_name, package_installer_settings
+):
+ with util.lock('serialized_action', timeout=100) as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to clean package installer'
+ )
+
+ if package_installer_name not in PK_INSTALLERS:
+ logging.error(
+ '%s not found in os_installers',
+ package_installer_name
+ )
+
+ package_installer = PK_INSTALLERS[package_installer_name](
+ package_installer_settings
+ )
+ package_installer.clean()
diff --git a/compass-deck/actions/cli.py b/compass-deck/actions/cli.py
new file mode 100644
index 0000000..c9058ed
--- /dev/null
+++ b/compass-deck/actions/cli.py
@@ -0,0 +1,179 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass Command Line Interface"""
+import logging
+import subprocess
+import sys
+
+from compass.actions.health_check import check
+from compass.db.api import database
+
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+from compass.utils.util import pretty_print
+
+
+ACTION_MAP = {
+ "check": "apache celery dhcp dns hds misc os_installer "
+ "package_installer squid tftp".split(" "),
+ "refresh": "db sync".split(" "),
+}
+
+
+class BootCLI(object):
+ """CLI to do compass check."""
+
+ def __init__(self):
+ return
+
+ def run(self, args):
+ """cli takes the commands and calls respective modules."""
+ action = self.get_action(args)
+ if action is None:
+ self.print_help()
+ else:
+ module = self.get_module(action, args)
+ if module == "invalid":
+ self.print_help(action)
+ else:
+ method = "self.run_" + action + "(module)"
+ eval(method)
+
+ @classmethod
+ def get_action(cls, args):
+ """This method returns an action type.
+
+ .. note::
+ For 'compass check dhcp' command, it will return 'check'.
+ """
+ if len(args) == 1:
+ return None
+ elif args[1] in ACTION_MAP.keys():
+ return args[1]
+ return None
+
+ @classmethod
+ def get_module(cls, action, args):
+ """This method returns a module.
+
+ .. note::
+ For 'compass check dhcp' command, it will return 'dhcp'.
+ """
+ if len(args) <= 2:
+ return None
+ elif args[2] in ACTION_MAP[action]:
+ return args[2]
+ return "invalid"
+
+ def run_check(self, module=None):
+ """This provides a flexible sanity check.
+
+ .. note::
+ param module default set to None.
+ if parameter module is none. Compass checks all modules.
+ If module specified, Compass will only check such module.
+ """
+ if module is None:
+ pretty_print("Starting: Compass Health Check",
+ "==============================")
+ chk = check.BootCheck()
+ res = chk.run()
+ self.output_check_result(res)
+
+ else:
+ pretty_print("Checking Module: %s" % module,
+ "============================")
+ chk = check.BootCheck()
+ method = "chk._check_" + module + "()"
+ res = eval(method)
+ print "\n".join(msg for msg in res[1])
+
+ @classmethod
+ def output_check_result(cls, result):
+ """output check result."""
+ if result == {}:
+ return
+ pretty_print("\n",
+ "===============================",
+ "* Compass Health Check Report *",
+ "===============================")
+ successful = True
+ for key in result.keys():
+ if result[key][0] == 0:
+ successful = False
+ print "%s" % "\n".join(item for item in result[key][1])
+
+ print "===================="
+ if successful is True:
+ print "Compass Check completes. No problems found, all systems go"
+ sys.exit(0)
+ else:
+ print (
+ "Compass has ERRORS shown above. Please fix them before "
+ "deploying!")
+ sys.exit(1)
+
+ @classmethod
+ def run_refresh(cls, action=None):
+ """Run refresh."""
+ # TODO(xicheng): replace refresh.sh with refresh.py
+ if action is None:
+ pretty_print("Refreshing Compass...",
+ "=================")
+ subprocess.Popen(
+ ['/opt/compass/bin/refresh.sh'], shell=True)
+ elif action == "db":
+ pretty_print("Refreshing Compass Database...",
+ "===================")
+ subprocess.Popen(
+ ['/opt/compass/bin/manage_db.py createdb'], shell=True)
+ else:
+ pretty_print("Syncing with Installers...",
+ "================")
+ subprocess.Popen(
+ ['/opt/compass/bin/manage_db.py sync_from_installers'],
+ shell=True
+ )
+
+ @classmethod
+ def print_help(cls, module_help=""):
+ """print help."""
+ if module_help == "":
+ pretty_print("usage\n=====",
+ "compass <refresh|check>",
+ "type 'compass {action} --help' for detailed "
+ "command list")
+
+ elif module_help == "refresh":
+ pretty_print("usage\n=====",
+ "compass refresh [%s]" %
+ "|".join(action for action in ACTION_MAP['refresh']))
+
+ else:
+ pretty_print("usage\n=====",
+ "compass check [%s]" %
+ "|".join(action for action in ACTION_MAP['check']))
+ sys.exit(2)
+
+
+def main():
+ """Compass cli entry point."""
+ flags.init()
+ logsetting.init()
+ database.init()
+ cli = BootCLI()
+ output = cli.run(sys.argv)
+ return sys.exit(output)
diff --git a/compass-deck/actions/install_callback.py b/compass-deck/actions/install_callback.py
new file mode 100644
index 0000000..aae955a
--- /dev/null
+++ b/compass-deck/actions/install_callback.py
@@ -0,0 +1,181 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to receive installation callback.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from compass.actions import util
+from compass.db.api import cluster as cluster_api
+from compass.db.api import host as host_api
+from compass.db.api import user as user_db
+from compass.deployment.deploy_manager import DeployManager
+from compass.deployment.utils import constants as const
+
+
+def os_installed(
+ host_id, clusterhosts_ready, clusters_os_ready,
+ username=None
+):
+ """Callback when os is installed.
+
+ :param host_id: host that os is installed.
+ :type host_id: integer
+ :param clusterhosts_ready: the clusterhosts that should trigger ready.
+ :param clusters_os_ready: the cluster that should trigger os ready.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to '
+ 'do the post action after os installation'
+ )
+ logging.info(
+ 'os installed on host %s '
+ 'with cluster host ready %s cluster os ready %s',
+ host_id, clusterhosts_ready, clusters_os_ready
+ )
+ if username:
+ user = user_db.get_user_object(username)
+ else:
+ user = None
+ os_installed_triggered = False
+ for cluster_id, clusterhost_ready in clusterhosts_ready.items():
+ if not clusterhost_ready and os_installed_triggered:
+ continue
+
+ cluster_info = util.ActionHelper.get_cluster_info(
+ cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, [host_id], user)
+
+ deploy_manager = DeployManager(
+ adapter_info, cluster_info, hosts_info)
+
+ if not os_installed_triggered:
+ deploy_manager.os_installed()
+ util.ActionHelper.host_ready(host_id, True, user)
+ os_installed_triggered = True
+
+ if clusterhost_ready:
+ # deploy_manager.cluster_os_installed()
+ util.ActionHelper.cluster_host_ready(
+ cluster_id, host_id, False, user
+ )
+
+ if util.ActionHelper.is_cluster_os_ready(cluster_id, user):
+ logging.info("deploy_manager begin cluster_os_installed")
+ deploy_manager.cluster_os_installed()
+
+
+def package_installed(
+ cluster_id, host_id, cluster_ready,
+ host_ready, username=None
+):
+ """Callback when package is installed.
+
+ :param cluster_id: cluster id.
+ :param host_id: host id.
+ :param cluster_ready: if the cluster should trigger ready.
+ :param host_ready: if the host should trigger ready.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to '
+ 'do the post action after package installation'
+ )
+ logging.info(
+ 'package installed on cluster %s host %s '
+ 'with cluster ready %s host ready %s',
+ cluster_id, host_id, cluster_ready, host_ready
+ )
+
+ if username:
+ user = user_db.get_user_object(username)
+ else:
+ user = None
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, [host_id], user)
+
+ deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
+
+ deploy_manager.package_installed()
+ util.ActionHelper.cluster_host_ready(cluster_id, host_id, True, user)
+ if cluster_ready:
+ util.ActionHelper.cluster_ready(cluster_id, False, user)
+ if host_ready:
+ util.ActionHelper.host_ready(host_id, False, user)
+
+
+def cluster_installed(
+ cluster_id, clusterhosts_ready,
+ username=None
+):
+ """Callback when cluster is installed.
+
+ :param cluster_id: cluster id
+ :param clusterhosts_ready: clusterhosts that should trigger ready.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to '
+ 'do the post action after cluster installation'
+ )
+ logging.info(
+ 'package installed on cluster %s with clusterhosts ready %s',
+ cluster_id, clusterhosts_ready
+ )
+ if username:
+ user = user_db.get_user_object(username)
+ else:
+ user = None
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, clusterhosts_ready.keys(), user)
+
+ deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
+
+ deploy_manager.cluster_installed()
+ util.ActionHelper.cluster_ready(cluster_id, True, user)
+ for host_id, clusterhost_ready in clusterhosts_ready.items():
+ if clusterhost_ready:
+ util.ActionHelper.cluster_host_ready(
+ cluster_id, host_id, False, user
+ )
diff --git a/compass-deck/actions/poll_switch.py b/compass-deck/actions/poll_switch.py
new file mode 100644
index 0000000..5c29b01
--- /dev/null
+++ b/compass-deck/actions/poll_switch.py
@@ -0,0 +1,162 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provider function to poll switch."""
+import logging
+import netaddr
+
+from compass.actions import util
+from compass.db.api import database
+from compass.db.api import switch as switch_api
+from compass.db.api import user as user_api
+from compass.hdsdiscovery.hdmanager import HDManager
+
+
+def _poll_switch(ip_addr, credentials, req_obj='mac', oper="SCAN"):
+ """Poll switch by ip addr.
+
+
+ Args:
+ ip_addr: ip addr of the switch.
+ credentials: credentials of the switch.
+
+ Returns: switch attributes dict and list of machine attributes dict.
+ """
+ under_monitoring = 'under_monitoring'
+ unreachable = 'unreachable'
+ polling_error = 'error'
+ hdmanager = HDManager()
+ vendor, state, err_msg = hdmanager.get_vendor(ip_addr, credentials)
+ if not vendor:
+ logging.info("*****error_msg: %s****", err_msg)
+ logging.error('no vendor found or match switch %s', ip_addr)
+ return (
+ {
+ 'vendor': vendor, 'state': state, 'err_msg': err_msg
+ }, {
+ }
+ )
+
+ logging.debug(
+ 'hdmanager learn switch from %s', ip_addr
+ )
+ results = []
+ try:
+ results = hdmanager.learn(
+ ip_addr, credentials, vendor, req_obj, oper
+ )
+ except Exception as error:
+ logging.exception(error)
+ state = unreachable
+ err_msg = (
+ 'SNMP walk for querying MAC addresses timedout'
+ )
+ return (
+ {
+ 'vendor': vendor, 'state': state, 'err_msg': err_msg
+ }, {
+ }
+ )
+
+ logging.info("pollswitch %s result: %s", ip_addr, results)
+ if not results:
+ logging.error(
+ 'no result learned from %s', ip_addr
+ )
+ state = polling_error
+ err_msg = 'No result learned from SNMP walk'
+ return (
+ {'vendor': vendor, 'state': state, 'err_msg': err_msg},
+ {}
+ )
+
+ logging.info('poll switch result: %s' % str(results))
+ machine_dicts = {}
+ for machine in results:
+ mac = machine['mac']
+ port = machine['port']
+ vlan = int(machine['vlan'])
+ if vlan:
+ vlans = [vlan]
+ else:
+ vlans = []
+ if mac not in machine_dicts:
+ machine_dicts[mac] = {'mac': mac, 'port': port, 'vlans': vlans}
+ else:
+ machine_dicts[mac]['port'] = port
+ machine_dicts[mac]['vlans'].extend(vlans)
+
+ logging.debug('update switch %s state to under monitoring', ip_addr)
+ state = under_monitoring
+ return (
+ {'vendor': vendor, 'state': state, 'err_msg': err_msg},
+ machine_dicts.values()
+ )
+
+
+def poll_switch(poller_email, ip_addr, credentials,
+ req_obj='mac', oper="SCAN"):
+ """Query switch and update switch machines.
+
+ .. note::
+ When polling switch succeeds, for each mac it got from polling switch,
+ A Machine record associated with the switch is added to the database.
+
+ :param ip_addr: switch ip address.
+ :type ip_addr: str
+ :param credentials: switch crednetials.
+ :type credentials: dict
+ :param req_obj: the object requested to query from switch.
+ :type req_obj: str
+ :param oper: the operation to query the switch.
+ :type oper: str, should be one of ['SCAN', 'GET', 'SET']
+
+ .. note::
+ The function should be called out of database session scope.
+ """
+ poller = user_api.get_user_object(poller_email)
+ ip_int = long(netaddr.IPAddress(ip_addr))
+ with util.lock('poll switch %s' % ip_addr, timeout=120) as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to poll switch %s' % ip_addr
+ )
+
+ # TODO(grace): before repoll the switch, set the state to repolling.
+ # and when the poll switch is timeout, set the state to error.
+ # the frontend should only consider some main state like INTIALIZED,
+ # ERROR and SUCCESSFUL, REPOLLING is as an intermediate state to
+ # indicate the switch is in learning the mac of the machines connected
+ # to it.
+ logging.debug('poll switch: %s', ip_addr)
+ switch_dict, machine_dicts = _poll_switch(
+ ip_addr, credentials, req_obj=req_obj, oper=oper
+ )
+ switches = switch_api.list_switches(ip_int=ip_int, user=poller)
+ if not switches:
+ logging.error('no switch found for %s', ip_addr)
+ return
+
+ for switch in switches:
+ for machine_dict in machine_dicts:
+ logging.info('add machine: %s', machine_dict)
+ machine_dict['owner_id'] = poller.id
+ switch_api.add_switch_machine(
+ switch['id'], False, user=poller, **machine_dict
+ )
+ switch_api.update_switch(
+ switch['id'],
+ user=poller,
+ **switch_dict
+ )
diff --git a/compass-deck/actions/update_progress.py b/compass-deck/actions/update_progress.py
new file mode 100644
index 0000000..67a9963
--- /dev/null
+++ b/compass-deck/actions/update_progress.py
@@ -0,0 +1,298 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to update status and installing progress of the given cluster.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from compass.actions import util
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import cluster as cluster_api
+from compass.db.api import host as host_api
+from compass.db.api import user as user_api
+from compass.log_analyzor import progress_calculator
+from compass.utils import setting_wrapper as setting
+
+
+def update_progress():
+ """Update status and installing progress of the given cluster.
+
+ :param cluster_hosts: clusters and hosts in each cluster to update.
+ :type cluster_hosts: dict of int or str to list of int or str
+
+ .. note::
+ The function should be called out of the database session scope.
+ In the function, it will update the database cluster_state and
+ host_state table for the deploying cluster and hosts.
+
+ The function will also query log_progressing_history table to get
+ the lastest installing progress and the position of log it has
+ processed in the last run. The function uses these information to
+ avoid recalculate the progress from the beginning of the log file.
+ After the progress got updated, these information will be stored back
+ to the log_progressing_history for next time run.
+ """
+ with util.lock('log_progressing', timeout=60, blocking=False) as lock:
+ if not lock:
+ logging.error(
+ 'failed to acquire lock to calculate installation progress'
+ )
+ return
+
+ logging.info('update installing progress')
+
+ user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
+ hosts = host_api.list_hosts(user=user)
+ host_mapping = {}
+ for host in hosts:
+ if 'id' not in host:
+ logging.error('id is not in host %s', host)
+ continue
+ host_id = host['id']
+ if 'os_name' not in host:
+ logging.error('os_name is not in host %s', host)
+ continue
+ if 'os_installer' not in host:
+ logging.error('os_installer is not in host %s', host)
+ continue
+ host_dirname = setting.HOST_INSTALLATION_LOGDIR_NAME
+ if host_dirname not in host:
+ logging.error(
+ '%s is not in host %s', host_dirname, host
+ )
+ continue
+ host_state = host_api.get_host_state(host_id, user=user)
+ if 'state' not in host_state:
+ logging.error('state is not in host state %s', host_state)
+ continue
+ if host_state['state'] == 'INSTALLING':
+ host_log_histories = host_api.get_host_log_histories(
+ host_id, user=user
+ )
+ host_log_history_mapping = {}
+ for host_log_history in host_log_histories:
+ if 'filename' not in host_log_history:
+ logging.error(
+ 'filename is not in host log history %s',
+ host_log_history
+ )
+ continue
+ host_log_history_mapping[
+ host_log_history['filename']
+ ] = host_log_history
+ host_mapping[host_id] = (
+ host, host_state, host_log_history_mapping
+ )
+ else:
+ logging.info(
+ 'ignore host state %s since it is not in installing',
+ host_state
+ )
+ adapters = adapter_api.list_adapters(user=user)
+ adapter_mapping = {}
+ for adapter in adapters:
+ if 'id' not in adapter:
+ logging.error(
+ 'id not in adapter %s', adapter
+ )
+ continue
+ if 'package_installer' not in adapter:
+ logging.info(
+ 'package_installer not in adapter %s', adapter
+ )
+ continue
+ adapter_id = adapter['id']
+ adapter_mapping[adapter_id] = adapter
+ clusters = cluster_api.list_clusters(user=user)
+ cluster_mapping = {}
+ for cluster in clusters:
+ if 'id' not in cluster:
+ logging.error('id not in cluster %s', cluster)
+ continue
+ cluster_id = cluster['id']
+ if 'adapter_id' not in cluster:
+ logging.error(
+ 'adapter_id not in cluster %s',
+ cluster
+ )
+ continue
+ cluster_state = cluster_api.get_cluster_state(
+ cluster_id,
+ user=user
+ )
+ if 'state' not in cluster_state:
+ logging.error('state not in cluster state %s', cluster_state)
+ continue
+ cluster_mapping[cluster_id] = (cluster, cluster_state)
+ clusterhosts = cluster_api.list_clusterhosts(user=user)
+ clusterhost_mapping = {}
+ for clusterhost in clusterhosts:
+ if 'clusterhost_id' not in clusterhost:
+ logging.error(
+ 'clusterhost_id not in clusterhost %s',
+ clusterhost
+ )
+ continue
+ clusterhost_id = clusterhost['clusterhost_id']
+ if 'cluster_id' not in clusterhost:
+ logging.error(
+ 'cluster_id not in clusterhost %s',
+ clusterhost
+ )
+ continue
+ cluster_id = clusterhost['cluster_id']
+ if cluster_id not in cluster_mapping:
+ logging.info(
+ 'ignore clusterhost %s '
+ 'since the cluster_id '
+ 'is not in cluster_mapping %s',
+ clusterhost, cluster_mapping
+ )
+ continue
+ cluster, _ = cluster_mapping[cluster_id]
+ if 'flavor_name' not in cluster:
+ logging.error(
+ 'flavor_name is not in clusterhost %s related cluster',
+ clusterhost
+ )
+ continue
+ clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME
+ if clusterhost_dirname not in clusterhost:
+ logging.error(
+ '%s is not in clusterhost %s',
+ clusterhost_dirname, clusterhost
+ )
+ continue
+ adapter_id = cluster['adapter_id']
+ if adapter_id not in adapter_mapping:
+ logging.info(
+ 'ignore clusterhost %s '
+ 'since the adapter_id %s '
+ 'is not in adaper_mapping %s',
+ clusterhost, adapter_id, adapter_mapping
+ )
+ continue
+ adapter = adapter_mapping[adapter_id]
+ if 'package_installer' not in adapter:
+ logging.info(
+ 'ignore clusterhost %s '
+ 'since the package_installer is not define '
+ 'in adapter %s',
+ clusterhost, adapter
+ )
+ continue
+ package_installer = adapter['package_installer']
+ clusterhost['package_installer'] = package_installer
+ clusterhost['adapter_name'] = adapter['name']
+ clusterhost_state = cluster_api.get_clusterhost_self_state(
+ clusterhost_id, user=user
+ )
+ if 'state' not in clusterhost_state:
+ logging.error(
+ 'state not in clusterhost_state %s',
+ clusterhost_state
+ )
+ continue
+ if clusterhost_state['state'] == 'INSTALLING':
+ clusterhost_log_histories = (
+ cluster_api.get_clusterhost_log_histories(
+ clusterhost_id, user=user
+ )
+ )
+ clusterhost_log_history_mapping = {}
+ for clusterhost_log_history in clusterhost_log_histories:
+ if 'filename' not in clusterhost_log_history:
+ logging.error(
+ 'filename not in clusterhost_log_history %s',
+ clusterhost_log_history
+ )
+ continue
+ clusterhost_log_history_mapping[
+ clusterhost_log_history['filename']
+ ] = clusterhost_log_history
+ clusterhost_mapping[clusterhost_id] = (
+ clusterhost, clusterhost_state,
+ clusterhost_log_history_mapping
+ )
+ else:
+ logging.info(
+ 'ignore clusterhost state %s '
+ 'since it is not in installing',
+ clusterhost_state
+ )
+
+ progress_calculator.update_host_progress(
+ host_mapping)
+ for host_id, (host, host_state, host_log_history_mapping) in (
+ host_mapping.items()
+ ):
+ host_api.update_host_state(
+ host_id, user=user,
+ percentage=host_state.get('percentage', 0),
+ message=host_state.get('message', ''),
+ severity=host_state.get('severity', 'INFO')
+ )
+ for filename, host_log_history in (
+ host_log_history_mapping.items()
+ ):
+ host_api.add_host_log_history(
+ host_id, filename=filename, user=user,
+ position=host_log_history.get('position', 0),
+ percentage=host_log_history.get('percentage', 0),
+ partial_line=host_log_history.get('partial_line', ''),
+ message=host_log_history.get('message', ''),
+ severity=host_log_history.get('severity', 'INFO'),
+ line_matcher_name=host_log_history.get(
+ 'line_matcher_name', 'start'
+ )
+ )
+ progress_calculator.update_clusterhost_progress(
+ clusterhost_mapping)
+ for (
+ clusterhost_id,
+ (clusterhost, clusterhost_state, clusterhost_log_history_mapping)
+ ) in (
+ clusterhost_mapping.items()
+ ):
+ cluster_api.update_clusterhost_state(
+ clusterhost_id, user=user,
+ percentage=clusterhost_state.get('percentage', 0),
+ message=clusterhost_state.get('message', ''),
+ severity=clusterhost_state.get('severity', 'INFO')
+ )
+ for filename, clusterhost_log_history in (
+ clusterhost_log_history_mapping.items()
+ ):
+ cluster_api.add_clusterhost_log_history(
+ clusterhost_id, user=user, filename=filename,
+ position=clusterhost_log_history.get('position', 0),
+ percentage=clusterhost_log_history.get('percentage', 0),
+ partial_line=clusterhost_log_history.get(
+ 'partial_line', ''),
+ message=clusterhost_log_history.get('message', ''),
+ severity=clusterhost_log_history.get('severity', 'INFO'),
+ line_matcher_name=(
+ clusterhost_log_history.get(
+ 'line_matcher_name', 'start'
+ )
+ )
+ )
+ progress_calculator.update_cluster_progress(
+ cluster_mapping)
+ for cluster_id, (cluster, cluster_state) in cluster_mapping.items():
+ cluster_api.update_cluster_state(
+ cluster_id, user=user
+ )
diff --git a/compass-deck/actions/util.py b/compass-deck/actions/util.py
new file mode 100644
index 0000000..4d9f855
--- /dev/null
+++ b/compass-deck/actions/util.py
@@ -0,0 +1,342 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provide util for actions
+
+ .. moduleauthor:: Xiaodong Wang ,xiaodongwang@huawei.com>
+"""
+import logging
+import redis
+
+from contextlib import contextmanager
+
+from compass.db.api import adapter_holder as adapter_db
+from compass.db.api import cluster as cluster_db
+from compass.db.api import host as host_db
+from compass.db.api import machine as machine_db
+from compass.deployment.utils import constants as const
+
+
+@contextmanager
+def lock(lock_name, blocking=True, timeout=10):
+ """acquire a lock to do some actions.
+
+ The lock is acquired by lock_name among the whole distributed
+ systems.
+ """
+ # TODO(xicheng): in future we should explicitly told which redis
+ # server we want to talk to make the lock works on distributed
+ # systems.
+ redis_instance = redis.Redis()
+ instance_lock = redis_instance.lock(lock_name, timeout=timeout)
+ owned = False
+ try:
+ locked = instance_lock.acquire(blocking=blocking)
+ if locked:
+ owned = True
+ logging.debug('acquired lock %s', lock_name)
+ yield instance_lock
+ else:
+ logging.info('lock %s is already hold', lock_name)
+ yield None
+
+ except Exception as error:
+ logging.info(
+ 'redis fails to acquire the lock %s', lock_name)
+ logging.exception(error)
+ yield None
+
+ finally:
+ if owned:
+ instance_lock.acquired_until = 0
+ instance_lock.release()
+ logging.debug('released lock %s', lock_name)
+ else:
+ logging.debug('nothing to release %s', lock_name)
+
+
+class ActionHelper(object):
+
+ @staticmethod
+ def get_adapter_info(adapter_id, cluster_id, user):
+ """Get adapter information. Return a dictionary as below,
+
+ {
+ "id": 1,
+ "name": "xxx",
+ "flavors": [
+ {
+ "flavor_name": "xxx",
+ "roles": ['xxx', 'yyy', ...],
+ "template": "xxx.tmpl"
+ },
+ ...
+ ],
+ "metadata": {
+ "os_config": {
+ ...
+ },
+ "package_config": {
+ ...
+ }
+ },
+ "os_installer": {
+ "name": "cobbler",
+ "settings": {....}
+ },
+ "pk_installer": {
+ "name": "chef",
+ "settings": {....}
+ },
+ ...
+ }
+ To view a complete output, please refer to backend doc.
+ """
+
+ adapter_info = adapter_db.get_adapter(adapter_id, user=user)
+ metadata = cluster_db.get_cluster_metadata(cluster_id, user=user)
+ adapter_info.update({const.METADATA: metadata})
+
+ for flavor_info in adapter_info[const.FLAVORS]:
+ roles = flavor_info[const.ROLES]
+ flavor_info[const.ROLES] = ActionHelper._get_role_names(roles)
+
+ return adapter_info
+
+ @staticmethod
+ def _get_role_names(roles):
+ return [role[const.NAME] for role in roles]
+
+ @staticmethod
+ def get_cluster_info(cluster_id, user):
+ """Get cluster information.Return a dictionary as below,
+
+ {
+ "id": 1,
+ "adapter_id": 1,
+ "os_version": "CentOS-6.5-x86_64",
+ "name": "cluster_01",
+ "flavor": {
+ "flavor_name": "zzz",
+ "template": "xx.tmpl",
+ "roles": [...]
+ }
+ "os_config": {..},
+ "package_config": {...},
+ "deployed_os_config": {},
+ "deployed_package_config": {},
+ "owner": "xxx"
+ }
+ """
+
+ cluster_info = cluster_db.get_cluster(cluster_id, user=user)
+
+ # convert roles retrieved from db into a list of role names
+ roles_info = cluster_info.setdefault(
+ const.FLAVOR, {}).setdefault(const.ROLES, [])
+ cluster_info[const.FLAVOR][const.ROLES] = \
+ ActionHelper._get_role_names(roles_info)
+
+ # get cluster config info
+ cluster_config = cluster_db.get_cluster_config(cluster_id, user=user)
+ cluster_info.update(cluster_config)
+
+ deploy_config = cluster_db.get_cluster_deployed_config(cluster_id,
+ user=user)
+ cluster_info.update(deploy_config)
+
+ return cluster_info
+
+ @staticmethod
+ def get_hosts_info(cluster_id, hosts_id_list, user):
+ """Get hosts information. Return a dictionary as below,
+
+ {
+ "hosts": {
+ 1($host_id): {
+ "reinstall_os": True,
+ "mac": "xxx",
+ "name": "xxx",
+ "roles": [xxx, yyy]
+ },
+ "networks": {
+ "eth0": {
+ "ip": "192.168.1.1",
+ "netmask": "255.255.255.0",
+ "is_mgmt": True,
+ "is_promiscuous": False,
+ "subnet": "192.168.1.0/24"
+ },
+ "eth1": {...}
+ },
+ "os_config": {},
+ "package_config": {},
+ "deployed_os_config": {},
+ "deployed_package_config": {}
+ },
+ 2: {...},
+ ....
+ }
+ }
+ """
+
+ hosts_info = {}
+ for host_id in hosts_id_list:
+ info = cluster_db.get_cluster_host(cluster_id, host_id, user=user)
+ logging.debug("checking on info %r %r" % (host_id, info))
+
+ info[const.ROLES] = ActionHelper._get_role_names(info[const.ROLES])
+
+ # TODO(grace): Is following line necessary??
+ info.setdefault(const.ROLES, [])
+
+ config = cluster_db.get_cluster_host_config(cluster_id,
+ host_id,
+ user=user)
+ info.update(config)
+
+ networks = info[const.NETWORKS]
+ networks_dict = {}
+ # Convert networks from list to dictionary format
+ for entry in networks:
+ nic_info = {}
+ nic_info = {
+ entry[const.NIC]: {
+ const.IP_ADDR: entry[const.IP_ADDR],
+ const.NETMASK: entry[const.NETMASK],
+ const.MGMT_NIC_FLAG: entry[const.MGMT_NIC_FLAG],
+ const.PROMISCUOUS_FLAG: entry[const.PROMISCUOUS_FLAG],
+ const.SUBNET: entry[const.SUBNET]
+ }
+ }
+ networks_dict.update(nic_info)
+
+ info[const.NETWORKS] = networks_dict
+
+ hosts_info[host_id] = info
+
+ return hosts_info
+
+ @staticmethod
+ def save_deployed_config(deployed_config, user):
+ """Save deployed config."""
+ cluster_config = deployed_config[const.CLUSTER]
+ cluster_id = cluster_config[const.ID]
+ del cluster_config[const.ID]
+
+ cluster_db.update_cluster_deployed_config(cluster_id, user=user,
+ **cluster_config)
+
+ hosts_id_list = deployed_config[const.HOSTS].keys()
+ for host_id in hosts_id_list:
+ config = deployed_config[const.HOSTS][host_id]
+ cluster_db.update_cluster_host_deployed_config(cluster_id,
+ host_id,
+ user=user,
+ **config)
+
+ @staticmethod
+ def update_state(
+ cluster_id, host_id_list, user, **kwargs
+ ):
+ # update all clusterhosts state
+ for host_id in host_id_list:
+ cluster_db.update_cluster_host_state(
+ cluster_id,
+ host_id,
+ user=user,
+ **kwargs
+ )
+
+ # update cluster state
+ cluster_db.update_cluster_state(
+ cluster_id,
+ user=user,
+ **kwargs
+ )
+
+ @staticmethod
+ def delete_cluster(
+ cluster_id, host_id_list, user, delete_underlying_host=False
+ ):
+ """Delete cluster.
+
+ If delete_underlying_host is set, underlying hosts will also
+ be deleted.
+ """
+ if delete_underlying_host:
+ for host_id in host_id_list:
+ host_db.del_host(
+ host_id, True, True, user=user
+ )
+ cluster_db.del_cluster(
+ cluster_id, True, True, user=user
+ )
+
+ @staticmethod
+ def delete_cluster_host(
+ cluster_id, host_id, user, delete_underlying_host=False
+ ):
+ """Delete clusterhost.
+
+ If delete_underlying_host set, also delete underlying host.
+ """
+ if delete_underlying_host:
+ host_db.del_host(
+ host_id, True, True, user=user
+ )
+ cluster_db.del_cluster_host(
+ cluster_id, host_id, True, True, user=user
+ )
+
+ @staticmethod
+ def delete_host(host_id, user):
+ host_db.del_host(
+ host_id, True, True, user=user
+ )
+
+ @staticmethod
+ def host_ready(host_id, from_database_only, user):
+ """Trigger host ready."""
+ host_db.update_host_state_internal(
+ host_id, from_database_only=from_database_only,
+ user=user, ready=True
+ )
+
+ @staticmethod
+ def cluster_host_ready(
+ cluster_id, host_id, from_database_only, user
+ ):
+ """Trigger clusterhost ready."""
+ cluster_db.update_cluster_host_state_internal(
+ cluster_id, host_id, from_database_only=from_database_only,
+ user=user, ready=True
+ )
+
+ @staticmethod
+ def is_cluster_os_ready(cluster_id, user=None):
+ return cluster_db.is_cluster_os_ready(cluster_id, user=user)
+
+ @staticmethod
+ def cluster_ready(cluster_id, from_database_only, user):
+ """Trigger cluster ready."""
+ cluster_db.update_cluster_state_internal(
+ cluster_id, from_database_only=from_database_only,
+ user=user, ready=True
+ )
+
+ @staticmethod
+ def get_machine_IPMI(machine_id, user):
+ machine_info = machine_db.get_machine(machine_id, user=user)
+ return machine_info[const.IPMI_CREDS]
diff --git a/compass-deck/api/__init__.py b/compass-deck/api/__init__.py
new file mode 100644
index 0000000..784fe23
--- /dev/null
+++ b/compass-deck/api/__init__.py
@@ -0,0 +1,42 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+from flask import Blueprint
+from flask.ext.login import LoginManager
+from flask import Flask
+
+# from compass.api.v1.api import v1_app
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+app = Flask(__name__)
+app.debug = True
+# blueprint = Blueprint('v2_app', __name__)
+# app.register_blueprint(v1_app, url_prefix='/v1.0')
+# app.register_blueprint(blueprint, url_prefix='/api')
+
+
+app.config['SECRET_KEY'] = 'abcd'
+app.config['AUTH_HEADER_NAME'] = setting.USER_AUTH_HEADER_NAME
+app.config['REMEMBER_COOKIE_DURATION'] = (
+ datetime.timedelta(
+ seconds=util.parse_time_interval(setting.USER_TOKEN_DURATION)
+ )
+)
+
+login_manager = LoginManager()
+login_manager.login_view = 'login'
+login_manager.init_app(app)
diff --git a/compass-deck/api/api. b/compass-deck/api/api.
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/api/api.
diff --git a/compass-deck/api/api.py b/compass-deck/api/api.py
new file mode 100644
index 0000000..e1cdd39
--- /dev/null
+++ b/compass-deck/api/api.py
@@ -0,0 +1,3391 @@
+#!/usr/bin/python
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define all the RestfulAPI entry points."""
+
+import datetime
+import functools
+import logging
+import netaddr
+import requests
+import simplejson as json
+
+from flask.ext.login import current_user
+from flask.ext.login import login_required
+from flask.ext.login import login_user
+from flask.ext.login import logout_user
+from flask import request
+
+from compass.api import app
+from compass.api import auth_handler
+from compass.api import exception_handler
+from compass.api import utils
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import cluster as cluster_api
+from compass.db.api import database
+from compass.db.api import health_check_report as health_report_api
+from compass.db.api import host as host_api
+from compass.db.api import machine as machine_api
+from compass.db.api import metadata_holder as metadata_api
+from compass.db.api import network as network_api
+from compass.db.api import permission as permission_api
+from compass.db.api import switch as switch_api
+from compass.db.api import user as user_api
+from compass.db.api import user_log as user_log_api
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+def log_user_action(func):
+ """decorator used to log api request url."""
+ @functools.wraps(func)
+ def decorated_api(*args, **kwargs):
+ # TODO(xicheng): save request args for GET
+ # and request data for POST/PUT.
+ user_log_api.log_user_action(current_user.id, request.path)
+ return func(*args, **kwargs)
+ return decorated_api
+
+
+def update_user_token(func):
+ """decorator used to update user token expire time after api request."""
+ @functools.wraps(func)
+ def decorated_api(*args, **kwargs):
+ response = func(*args, **kwargs)
+ expire_timestamp = (
+ datetime.datetime.now() + app.config['REMEMBER_COOKIE_DURATION']
+ )
+ user_api.record_user_token(
+ current_user.token, expire_timestamp, user=current_user
+ )
+ return response
+ return decorated_api
+
+
+def _clean_data(data, keys):
+ """remove keys from dict."""
+ for key in keys:
+ if key in data:
+ del data[key]
+
+
+def _replace_data(data, key_mapping):
+ """replace key names in dict."""
+ for key, replaced_key in key_mapping.items():
+ if key in data:
+ data[replaced_key] = data[key]
+ del data[key]
+
+
+def _get_data(data, key):
+ """get key's value from request arg dict.
+
+ When the value is list, return the element in the list
+ if the list size is one. If the list size is greater than one,
+ raise exception_handler.BadRequest.
+
+ Example: data = {'a': ['b'], 'b': 5, 'c': ['d', 'e'], 'd': []}
+ _get_data(data, 'a') == 'b'
+ _get_data(data, 'b') == 5
+ _get_data(data, 'c') raises exception_handler.BadRequest
+ _get_data(data, 'd') == None
+ _get_data(data, 'e') == None
+
+ Usage: Used to parse the key-value pair in request.args to expected types.
+ Depends on the different flask plugins and what kind of parameters
+ passed in, the request.args format may be as below:
+ {'a': 'b'} or {'a': ['b']}. _get_data forces translate the
+ request.args to the format {'a': 'b'}. It raises exception when some
+ parameter declares multiple times.
+ """
+ if key in data:
+ if isinstance(data[key], list):
+ if data[key]:
+ if len(data[key]) == 1:
+ return data[key][0]
+ else:
+ raise exception_handler.BadRequest(
+ '%s declared multi times %s in request' % (
+ key, data[key]
+ )
+ )
+ else:
+ return None
+ else:
+ return data[key]
+ else:
+ return None
+
+
+def _get_data_list(data, key):
+ """get key's value as list from request arg dict.
+
+ If the value type is list, return it, otherwise return the list
+ whos only element is the value got from the dict.
+
+ Example: data = {'a': ['b'], 'b': 5, 'c': ['d', 'e'], 'd': []}
+ _get_data_list(data, 'a') == ['b']
+ _get_data_list(data, 'b') == [5]
+ _get_data_list(data, 'd') == []
+ _get_data_list(data, 'e') == []
+
+ Usage: Used to parse the key-value pair in request.args to expected types.
+ Depends on the different flask plugins and what kind of parameters
+ passed in, the request.args format may be as below:
+ {'a': 'b'} or {'a': ['b']}. _get_data_list forces translate the
+ request.args to the format {'a': ['b']}. It accepts the case that
+ some parameter declares multiple times.
+ """
+ if key in data:
+ if isinstance(data[key], list):
+ return data[key]
+ else:
+ return [data[key]]
+ else:
+ return []
+
+
+def _get_request_data():
+ """Convert reqeust data from string to python dict.
+
+ If the request data is not json formatted, raises
+ exception_handler.BadRequest.
+ If the request data is not json formatted dict, raises
+ exception_handler.BadRequest
+ If the request data is empty, return default as empty dict.
+
+ Usage: It is used to add or update a single resource.
+ """
+ if request.data:
+ try:
+ data = json.loads(request.data)
+ except Exception:
+ raise exception_handler.BadRequest(
+ 'request data is not json formatted: %s' % request.data
+ )
+ if not isinstance(data, dict):
+ raise exception_handler.BadRequest(
+ 'request data is not json formatted dict: %s' % request.data
+ )
+ return data
+ else:
+ return {}
+
+
+def _get_request_data_as_list():
+ """Convert reqeust data from string to python list.
+
+ If the request data is not json formatted, raises
+ exception_handler.BadRequest.
+ If the request data is not json formatted list, raises
+ exception_handler.BadRequest.
+ If the request data is empty, return default as empty list.
+
+ Usage: It is used to batch add or update a list of resources.
+ """
+ if request.data:
+ try:
+ data = json.loads(request.data)
+ except Exception:
+ raise exception_handler.BadRequest(
+ 'request data is not json formatted: %s' % request.data
+ )
+ if not isinstance(data, list):
+ raise exception_handler.BadRequest(
+ 'request data is not json formatted list: %s' % request.data
+ )
+ return data
+ else:
+ return []
+
+
+def _bool_converter(value):
+ """Convert string value to bool.
+
+ This function is used to convert value in requeset args to expected type.
+ If the key exists in request args but the value is not set, it means the
+ value should be true.
+
+ Examples:
+ /<request_path>?is_admin parsed to {'is_admin', None} and it should
+ be converted to {'is_admin': True}.
+ /<request_path>?is_admin=0 parsed and converted to {'is_admin': False}.
+ /<request_path>?is_admin=1 parsed and converted to {'is_admin': True}.
+ """
+ if not value:
+ return True
+ if value in ['False', 'false', '0']:
+ return False
+ if value in ['True', 'true', '1']:
+ return True
+ raise exception_handler.BadRequest(
+ '%r type is not bool' % value
+ )
+
+
+def _int_converter(value):
+ """Convert string value to int.
+
+ We do not use the int converter default exception since we want to make
+ sure the exact http response code.
+
+ Raises: exception_handler.BadRequest if value can not be parsed to int.
+
+ Examples:
+ /<request_path>?count=10 parsed to {'count': '10'} and it should be
+ converted to {'count': 10}.
+ """
+ try:
+ return int(value)
+ except Exception:
+ raise exception_handler.BadRequest(
+ '%r type is not int' % value
+ )
+
+
+def _get_request_args(**kwargs):
+ """Get request args as dict.
+
+ The value in the dict is converted to expected type.
+
+ Args:
+ kwargs: for each key, the value is the type converter.
+ """
+ args = dict(request.args)
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'origin request args: %s', args
+ )
+ for key, value in args.items():
+ if key in kwargs:
+ converter = kwargs[key]
+ if isinstance(value, list):
+ args[key] = [converter(item) for item in value]
+ else:
+ args[key] = converter(value)
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'request args: %s', args
+ )
+ return args
+
+
+def _group_data_action(data, **data_callbacks):
+ """Group api actions and pass data to grouped action callback.
+
+ Example:
+ data = {
+ 'add_hosts': [{'name': 'a'}, {'name': 'b'}],
+ 'update_hosts': {'c': {'mac': '123'}},
+ 'remove_hosts': ['d', 'e']
+ }
+ data_callbacks = {
+ 'add_hosts': update_cluster_action,
+ 'update_hosts': update_cluster_action,
+ 'remove_hosts': update_cluster_action
+ }
+ it converts to update_cluster_action(
+ add_hosts=[{'name': 'a'}, {'name': 'b'}],
+ update_hosts={'c': {'mac': '123'}},
+ remove_hosts=['d', 'e']
+ )
+
+ Raises:
+ exception_handler.BadRequest if data is empty.
+ exception_handler.BadMethod if there are some keys in data but
+ not in data_callbacks.
+ exception_handler.BadRequest if it groups to multiple
+ callbacks.
+ """
+ if not data:
+ raise exception_handler.BadRequest(
+ 'no action to take'
+ )
+ unsupported_keys = list(set(data) - set(data_callbacks))
+ if unsupported_keys:
+ raise exception_handler.BadMethod(
+ 'unsupported actions: %s' % unsupported_keys
+ )
+ callback_datas = {}
+ for data_key, data_value in data.items():
+ callback = data_callbacks[data_key]
+ callback_datas.setdefault(id(callback), {})[data_key] = data_value
+ if len(callback_datas) > 1:
+ raise exception_handler.BadRequest(
+ 'multi actions are not supported'
+ )
+ callback_ids = {}
+ for data_key, data_callback in data_callbacks.items():
+ callback_ids[id(data_callback)] = data_callback
+ for callback_id, callback_data in callback_datas.items():
+ return callback_ids[callback_id](**callback_data)
+
+
+def _wrap_response(func, response_code):
+ """wrap function response to json formatted http response."""
+ def wrapped_func(*args, **kwargs):
+ return utils.make_json_response(
+ response_code,
+ func(*args, **kwargs)
+ )
+ return wrapped_func
+
+
+def _reformat_host_networks(networks):
+ """Reformat networks from list to dict.
+
+ The key in the dict is the value of the key 'interface'
+ in each network.
+
+ Example: networks = [{'interface': 'eth0', 'ip': '10.1.1.1'}]
+ is reformatted to {
+ 'eth0': {'interface': 'eth0', 'ip': '10.1.1.1'}
+ }
+
+ Usage: The networks got from db api is a list of network,
+ For better parsing in json frontend, we converted the
+ format into dict to easy reference.
+ """
+ network_mapping = {}
+ for network in networks:
+ if 'interface' in network:
+ network_mapping[network['interface']] = network
+ return network_mapping
+
+
+def _reformat_host(host):
+ """Reformat host's networks."""
+ if isinstance(host, list):
+ return [_reformat_host(item) for item in host]
+ if 'networks' in host:
+ host['networks'] = _reformat_host_networks(host['networks'])
+ return host
+
+
+def _login(use_cookie):
+ """User login helper function.
+
+ The request data should contain at least 'email' and 'password'.
+ The cookie expiration duration is defined in flask app config.
+ If user is not authenticated, it raises Unauthorized exception.
+ """
+ data = _get_request_data()
+ if 'email' not in data or 'password' not in data:
+ raise exception_handler.BadRequest(
+ 'missing email or password in data'
+ )
+ expire_timestamp = (
+ datetime.datetime.now() + app.config['REMEMBER_COOKIE_DURATION']
+ )
+ data['expire_timestamp'] = expire_timestamp
+ user = auth_handler.authenticate_user(**data)
+ if not user.active:
+ raise exception_handler.UserDisabled(
+ '%s is not activated' % user.email
+ )
+ if not login_user(user, remember=data.get('remember', False)):
+ raise exception_handler.UserDisabled('failed to login: %s' % user)
+
+ user_log_api.log_user_action(user.id, request.path)
+ response_data = user_api.record_user_token(
+ user.token, user.expire_timestamp, user=user
+ )
+ return utils.make_json_response(200, response_data)
+
+
+@app.route('/users/token', methods=['POST'])
+def get_token():
+ """user login and return token."""
+ return _login(False)
+
+
+@app.route("/users/login", methods=['POST'])
+def login():
+ """User login."""
+ return _login(True)
+
+
+@app.route("/users/register", methods=['POST'])
+def register():
+ """register new user."""
+ data = _get_request_data()
+ data['is_admin'] = False
+ data['active'] = False
+ return utils.make_json_response(
+ 200, user_api.add_user(**data)
+ )
+
+
+@app.route('/users/logout', methods=['POST'])
+@login_required
+def logout():
+ """User logout."""
+ user_log_api.log_user_action(current_user.id, request.path)
+ response_data = user_api.clean_user_token(
+ current_user.token, user=current_user
+ )
+ logout_user()
+ return utils.make_json_response(200, response_data)
+
+
+@app.route("/users", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_users():
+ """list users.
+
+ Supported paramters: ['email', 'is_admin', 'active']
+ """
+ data = _get_request_args(
+ is_admin=_bool_converter,
+ active=_bool_converter
+ )
+ return utils.make_json_response(
+ 200, user_api.list_users(user=current_user, **data)
+ )
+
+
+@app.route("/users", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_user():
+ """add user.
+
+ Must parameters: ['email', 'password'],
+ Optional paramters: ['is_admin', 'active']
+ """
+ data = _get_request_data()
+ user_dict = user_api.add_user(user=current_user, **data)
+ return utils.make_json_response(
+ 200, user_dict
+ )
+
+
+@app.route("/users/<int:user_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_user(user_id):
+ """Get user by id."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200, user_api.get_user(user_id, user=current_user, **data)
+ )
+
+
+@app.route("/current-user", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_current_user():
+ """Get current user."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200, user_api.get_current_user(user=current_user, **data)
+ )
+
+
+@app.route("/users/<int:user_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_user(user_id):
+ """Update user.
+
+ Supported parameters by self: [
+ 'email', 'firstname', 'lastname', 'password'
+ ]
+ Supported parameters by admin ['is_admin', 'active']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_api.update_user(
+ user_id,
+ user=current_user,
+ **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_user(user_id):
+ """Delete user.
+
+ Delete is only permitted by admin user.
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_api.del_user(
+ user_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>/permissions", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_user_permissions(user_id):
+ """Get user permissions."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200, user_api.get_permissions(user_id, user=current_user, **data)
+ )
+
+
+@app.route("/users/<int:user_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_user_action(user_id):
+ """Take user action.
+
+ Support actions: [
+ 'add_permissions', 'remove_permissions',
+ 'set_permissions', 'enable_user',
+ 'disable_user'
+ ]
+ """
+ data = _get_request_data()
+ update_permissions_func = _wrap_response(
+ functools.partial(
+ user_api.update_permissions, user_id, user=current_user,
+ ),
+ 200
+ )
+
+ def disable_user(disable_user=None):
+ return user_api.update_user(
+ user_id, user=current_user, active=False
+ )
+
+ disable_user_func = _wrap_response(
+ disable_user,
+ 200
+ )
+
+ def enable_user(enable_user=None):
+ return user_api.update_user(
+ user_id, user=current_user, active=True
+ )
+
+ enable_user_func = _wrap_response(
+ enable_user,
+ 200
+ )
+ return _group_data_action(
+ data,
+ add_permissions=update_permissions_func,
+ remove_permissions=update_permissions_func,
+ set_permissions=update_permissions_func,
+ enable_user=enable_user_func,
+ disable_user=disable_user_func
+ )
+
+
+@app.route(
+ '/users/<int:user_id>/permissions/<int:permission_id>',
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_user_permission(user_id, permission_id):
+ """Get a specific user permission."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ user_api.get_permission(
+ user_id, permission_id, user=current_user,
+ **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>/permissions", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_user_permission(user_id):
+ """Add permission to a specific user.
+
+ add_user_permission is only permitted by admin user.
+ Must parameters: ['permission_id']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_api.add_permission(
+ user_id, user=current_user,
+ **data
+ )
+ )
+
+
+@app.route(
+ '/users/<int:user_id>/permissions/<permission_id>',
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_user_permission(user_id, permission_id):
+ """Delete a specific user permission."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_api.del_permission(
+ user_id, permission_id, user=current_user,
+ **data
+ )
+ )
+
+
+@app.route("/permissions", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_permissions():
+ """List permissions.
+
+ Supported filters: ['id', 'name', 'alias', 'description']
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ permission_api.list_permissions(user=current_user, **data)
+ )
+
+
+@app.route("/permissions/<int:permission_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_permission(permission_id):
+ """Get permission."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ permission_api.get_permission(permission_id, user=current_user, **data)
+ )
+
+
+def _filter_timestamp(data):
+ """parse timestamp related params to db api understandable params.
+
+ Example:
+ {'timestamp_start': '2005-12-23 12:00:00'} to
+ {'timestamp': {'ge': timestamp('2005-12-23 12:00:00')}},
+ {'timestamp_end': '2005-12-23 12:00:00'} to
+ {'timestamp': {'le': timestamp('2005-12-23 12:00:00')}},
+ {'timestamp_range': '2005-12-23 12:00:00,2005-12-24 12:00:00'} to
+ {'timestamp': {'between': [
+ timestamp('2005-12-23 12:00:00'),
+ timestamp('2005-12-24 12:00:00')
+ ]
+ }}
+
+ The timestamp related params can be declared multi times.
+ """
+ timestamp_filter = {}
+ start = _get_data(data, 'timestamp_start')
+ if start is not None:
+ timestamp_filter['ge'] = util.parse_datetime(
+ start, exception_handler.BadRequest
+ )
+ end = _get_data(data, 'timestamp_end')
+ if end is not None:
+ timestamp_filter['le'] = util.parse_datetime(
+ end, exception_handler.BadRequest)
+ range = _get_data_list(data, 'timestamp_range')
+ if range:
+ timestamp_filter['between'] = []
+ for value in range:
+ timestamp_filter['between'].append(
+ util.parse_datetime_range(
+ value, exception_handler.BadRequest
+ )
+ )
+ data['timestamp'] = timestamp_filter
+ _clean_data(
+ data,
+ [
+ 'timestamp_start', 'timestamp_end',
+ 'timestamp_range'
+ ]
+ )
+
+
+@app.route("/users/logs", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_all_user_actions():
+ """List all users actions.
+
+ Supported filters: [
+ 'timestamp_start', 'timestamp_end', 'timestamp_range',
+ 'user_email'
+ ]
+ """
+ data = _get_request_args()
+ _filter_timestamp(data)
+ return utils.make_json_response(
+ 200,
+ user_log_api.list_actions(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>/logs", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_user_actions(user_id):
+ """List user actions for specific user.
+
+ Supported filters: [
+ 'timestamp_start', 'timestamp_end', 'timestamp_range',
+ ]
+ """
+ data = _get_request_args()
+ _filter_timestamp(data)
+ return utils.make_json_response(
+ 200,
+ user_log_api.list_user_actions(
+ user_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/users/logs", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_all_user_actions():
+ """Delete all user actions."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_log_api.del_actions(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/users/<int:user_id>/logs", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_user_actions(user_id):
+ """Delete user actions for specific user."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ user_log_api.del_user_actions(
+ user_id, user=current_user, **data
+ )
+ )
+
+
+def _filter_switch_ip(data):
+ """filter switch ip related params to db/api understandable format.
+
+ Examples:
+ {'switchIp': '10.0.0.1'} to {'ip_int': {'eq': int of '10.0.0.1'}}
+ {'switchIpStart': '10.0.0.1'} to
+ {'ip_int': {'ge': int of '10.0.0.1'}}
+ {'switchIpEnd': '10.0.0.1'} to
+ {'ip_int': {'le': int of '10.0.0.1'}}
+ {'switchIpRange': '10.0.0.1,10.0.0.254'} to
+ {'ip_int': {'between': [int of '10.0.0.1', int of '10.0.0.254']}}
+
+ the switch ip related params can be declared multi times.
+ """
+ ip_filter = {}
+ switch_ips = _get_data_list(data, 'switchIp')
+ if switch_ips:
+ ip_filter['eq'] = []
+ for switch_ip in switch_ips:
+ ip_filter['eq'].append(long(netaddr.IPAddress(switch_ip)))
+ switch_start = _get_data(data, 'switchIpStart')
+ if switch_start is not None:
+ ip_filter['ge'] = long(netaddr.IPAddress(switch_start))
+ switch_end = _get_data(data, 'switchIpEnd')
+ if switch_end is not None:
+ ip_filter['lt'] = long(netaddr.IPAddress(switch_end))
+ switch_nets = _get_data_list(data, 'switchIpNetwork')
+ if switch_nets:
+ ip_filter['between'] = []
+ for switch_net in switch_nets:
+ network = netaddr.IPNetwork(switch_net)
+ ip_filter['between'].append((network.first, network.last))
+ switch_ranges = _get_data_list(data, 'switchIpRange')
+ if switch_ranges:
+ ip_filter.setdefault('between', [])
+ for switch_range in switch_ranges:
+ ip_start, ip_end = switch_range.split(',')
+ ip_filter['between'].append(
+ long(netaddr.IPAddress(ip_start)),
+ long(netaddr.IPAddress(ip_end))
+ )
+ if ip_filter:
+ data['ip_int'] = ip_filter
+ _clean_data(
+ data,
+ [
+ 'switchIp', 'switchIpStart', 'switchIpEnd',
+ 'switchIpNetwork', 'switchIpRange'
+ ]
+ )
+
+
+@app.route("/switches", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switches():
+ """List switches.
+
+ Supported filters: [
+ 'switchIp', 'switchIpStart', 'switchIpEnd',
+ 'switchIpEnd', 'vendor', 'state'
+ ]
+ """
+ data = _get_request_args()
+ _filter_switch_ip(data)
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switches(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_switch(switch_id):
+ """Get switch."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200, switch_api.get_switch(switch_id, user=current_user, **data)
+ )
+
+
+@app.route("/switches", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_switch():
+ """add switch.
+
+ Must fields: ['ip']
+ Optional fields: [
+ 'credentials', 'vendor', 'state',
+ 'err_msg', 'filters'
+ ]
+ """
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.add_switch(user=current_user, **data)
+ )
+
+
+@app.route("/switchesbatch", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_switches():
+ """batch add switches.
+
+ request data is a list of dict. Each dict must contain ['ip'],
+ may contain [
+ 'credentials', 'vendor', 'state', 'err_msg', 'filters'
+ ]
+ """
+ data = _get_request_data_as_list()
+ for item_data in data:
+ _replace_data(item_data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.add_switches(
+ data=data, user=current_user
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_switch(switch_id):
+ """update switch.
+
+ Supported fields: [
+ 'ip', 'credentials', 'vendor', 'state',
+ 'err_msg', 'filters'
+ ]
+ """
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.update_switch(switch_id, user=current_user, **data)
+ )
+
+
+@app.route("/switches/<int:switch_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_switch(switch_id):
+ """patch switch.
+
+ Supported fields: [
+ 'credentials', 'filters'
+ ]
+ """
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.patch_switch(switch_id, user=current_user, **data)
+ )
+
+
+@app.route("/switches/<int:switch_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_switch(switch_id):
+ """delete switch."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.del_switch(switch_id, user=current_user, **data)
+ )
+
+
+@util.deprecated
+@app.route("/switch-filters", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switch_filters():
+ """List switch filters."""
+ data = _get_request_args()
+ _filter_switch_ip(data)
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switch_filters(
+ user=current_user, **data
+ )
+ )
+
+
+@util.deprecated
+@app.route("/switch-filters/<int:switch_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_switch_filters(switch_id):
+ """Get switch filters."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ switch_api.get_switch_filters(switch_id, user=current_user, **data)
+ )
+
+
+@util.deprecated
+@app.route("/switch-filters/<int:switch_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_switch_filters(switch_id):
+ """update switch filters."""
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.update_switch_filters(switch_id, user=current_user, **data)
+ )
+
+
+@util.deprecated
+@app.route("/switch-filters/<int:switch_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_switch_filters(switch_id):
+ """patch switch filters."""
+ data = _get_request_data()
+ _replace_data(data, {'filters': 'machine_filters'})
+ return utils.make_json_response(
+ 200,
+ switch_api.patch_switch_filter(switch_id, user=current_user, **data)
+ )
+
+
+def _filter_switch_port(data):
+ """Generate switch machine filters by switch port related fields.
+
+ Examples:
+ {'port': 'ae20'} to {'port': {'eq': 'ae20'}}
+ {'portStart': 20, 'portPrefix': 'ae', 'portSuffix': ''} to
+ {'port': {'startswith': 'ae', 'endswith': '', 'resp_ge': 20}}
+ {'portEnd': 20, 'portPrefix': 'ae', 'portSuffix': ''} to
+ {'port': {'startswith': 'ae', 'endswith': '', 'resp_le': 20}}
+ {'portRange': '20,40', 'portPrefix': 'ae', 'portSuffix': ''} to
+ {'port': {
+ 'startswith': 'ae', 'endswith': '', 'resp_range': [(20. 40)]
+ }}
+
+ For each switch machines port, it extracts portNumber from
+ '<portPrefix><portNumber><portSuffix>' and filter the returned switch
+ machines by the filters.
+ """
+ port_filter = {}
+ ports = _get_data_list(data, 'port')
+ if ports:
+ port_filter['eq'] = ports
+ port_start = _get_data(data, 'portStart')
+ if port_start is not None:
+ port_filter['resp_ge'] = int(port_start)
+ port_end = _get_data(data, 'portEnd')
+ if port_end is not None:
+ port_filter['resp_lt'] = int(port_end)
+ port_ranges = _get_data_list(data, 'portRange')
+ if port_ranges:
+ port_filter['resp_range'] = []
+ for port_range in port_ranges:
+ port_start, port_end = port_range.split(',')
+ port_filter['resp_range'].append(
+ (int(port_start), int(port_end))
+ )
+ port_prefix = _get_data(data, 'portPrefix')
+ if port_prefix:
+ port_filter['startswith'] = port_prefix
+ port_suffix = _get_data(data, 'portSuffix')
+ if port_suffix:
+ port_filter['endswith'] = port_suffix
+ if port_filter:
+ data['port'] = port_filter
+ _clean_data(
+ data,
+ [
+ 'portStart', 'portEnd', 'portRange',
+ 'portPrefix', 'portSuffix'
+ ]
+ )
+
+
+def _filter_general(data, key):
+ """Generate general filter for db/api returned list.
+
+ Supported filter type: [
+ 'resp_eq', 'resp_in', 'resp_le', 'resp_ge',
+ 'resp_gt', 'resp_lt', 'resp_match'
+ ]
+ """
+ general_filter = {}
+ general = _get_data_list(data, key)
+ if general:
+ general_filter['resp_in'] = general
+ data[key] = general_filter
+
+
+def _filter_machine_tag(data):
+ """Generate filter for machine tag.
+
+ Examples:
+ original returns:
+ [{'tag': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b3',
+ 'stack': '20'
+ }},{'location': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b2',
+ 'stack': '20'
+ }}]
+ filter: {'tag': 'room=205;rack=a2b3'}
+ filtered: [{'tag': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b3',
+ 'stack': '20'
+ }}]
+ """
+ tag_filter = {}
+ tags = _get_data_list(data, 'tag')
+ if tags:
+ tag_filter['resp_in'] = []
+ for tag in tags:
+ tag_filter['resp_in'].append(
+ util.parse_request_arg_dict(tag)
+ )
+ data['tag'] = tag_filter
+
+
+def _filter_machine_location(data):
+ """Generate filter for machine location.
+
+ Examples:
+ original returns:
+ [{'location': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b3',
+ 'stack': '20'
+ }},{'location': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b2',
+ 'stack': '20'
+ }}]
+ filter: {'location': 'room=205;rack=a2b3'}
+ filtered: [{'location': {
+ 'city': 'beijing',
+ 'building': 'tsinghua main building',
+ 'room': '205', 'rack': 'a2b3',
+ 'stack': '20'
+ }}]
+ """
+ location_filter = {}
+ locations = _get_data_list(data, 'location')
+ if locations:
+ location_filter['resp_in'] = []
+ for location in locations:
+ location_filter['resp_in'].append(
+ util.parse_request_arg_dict(location)
+ )
+ data['location'] = location_filter
+
+
+@app.route("/switches/<int:switch_id>/machines", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switch_machines(switch_id):
+ """Get switch machines.
+
+ Supported filters: [
+ 'port', 'portStart', 'portEnd', 'portRange',
+ 'portPrefix', 'portSuffix', 'vlans', 'tag', 'location'
+ ]
+ """
+ data = _get_request_args(vlans=_int_converter)
+ _filter_switch_port(data)
+ _filter_general(data, 'vlans')
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switch_machines(
+ switch_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>/machines-hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switch_machines_hosts(switch_id):
+ """Get switch machines or hosts.
+
+ Supported filters: [
+ 'port', 'portStart', 'portEnd', 'portRange',
+ 'portPrefix', 'portSuffix', 'vlans', 'tag', 'location',
+ 'os_name', 'os_id'
+ ]
+
+ """
+ data = _get_request_args(vlans=_int_converter, os_id=_int_converter)
+ _filter_switch_port(data)
+ _filter_general(data, 'vlans')
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ _filter_general(data, 'os_name')
+ # TODO(xicheng): os_id filter should be removed later
+ _filter_general(data, 'os_id')
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switch_machines_hosts(
+ switch_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>/machines", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_switch_machine(switch_id):
+ """add switch machine.
+
+ Must fields: ['mac', 'port']
+ Optional fields: ['vlans', 'ipmi_credentials', 'tag', 'location']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.add_switch_machine(switch_id, user=current_user, **data)
+ )
+
+
+@app.route("/switches/machines", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_switch_machines():
+ """batch add switch machines.
+
+ request data is list of dict which contains switch machine fields.
+ Each dict must contain ['switch_ip', 'mac', 'port'],
+ may contain ['vlans', 'ipmi_credentials', 'tag', 'location'].
+ """
+ data = _get_request_data_as_list()
+ return utils.make_json_response(
+ 200, switch_api.add_switch_machines(
+ data=data, user=current_user
+ )
+ )
+
+
+@app.route(
+ '/switches/<int:switch_id>/machines/<int:machine_id>',
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_switch_machine(switch_id, machine_id):
+ """get switch machine."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ switch_api.get_switch_machine(
+ switch_id, machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/switches/<int:switch_id>/machines/<int:machine_id>',
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_switch_machine(switch_id, machine_id):
+ """update switch machine.
+
+ Supported fields: [
+ 'port', 'vlans', 'ipmi_credentials', 'tag', 'location'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.update_switch_machine(
+ switch_id, machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/switches/<int:switch_id>/machines/<int:machine_id>',
+ methods=['PATCH']
+)
+@log_user_action
+@login_required
+@update_user_token
+def patch_switch_machine(switch_id, machine_id):
+ """patch switch machine.
+
+ Supported fields: [
+ 'vlans', 'ipmi_credentials', 'tag', 'location'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.patch_switch_machine(
+ current_user, switch_id, machine_id, **data
+ )
+ )
+
+
+@app.route(
+ '/switches/<int:switch_id>/machines/<int:machine_id>',
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_switch_machine(switch_id, machine_id):
+ """Delete switch machine."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.del_switch_machine(
+ switch_id, machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches/<int:switch_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_switch_action(switch_id):
+ """take switch action.
+
+ Supported actions: [
+ 'find_machines', 'add_machines', 'remove_machines',
+ 'set_machines'
+ ]
+ """
+ data = _get_request_data()
+ poll_switch_func = _wrap_response(
+ functools.partial(
+ switch_api.poll_switch, switch_id, user=current_user,
+ ),
+ 202
+ )
+ update_switch_machines_func = _wrap_response(
+ functools.partial(
+ switch_api.update_switch_machines, switch_id, user=current_user,
+ ),
+ 200
+ )
+ return _group_data_action(
+ data,
+ find_machines=poll_switch_func,
+ add_machines=update_switch_machines_func,
+ remove_machines=update_switch_machines_func,
+ set_machines=update_switch_machines_func
+ )
+
+
+@app.route("/machines/<int:machine_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_machine_action(machine_id):
+ """take machine action.
+
+ Supported actions: ['tag', 'poweron', 'poweroff', 'reset']
+ """
+ data = _get_request_data()
+ tag_func = _wrap_response(
+ functools.partial(
+ machine_api.update_machine, machine_id, user=current_user,
+ ),
+ 200
+ )
+ poweron_func = _wrap_response(
+ functools.partial(
+ machine_api.poweron_machine, machine_id, user=current_user,
+ ),
+ 202
+ )
+ poweroff_func = _wrap_response(
+ functools.partial(
+ machine_api.poweroff_machine, machine_id, user=current_user,
+ ),
+ 202
+ )
+ reset_func = _wrap_response(
+ functools.partial(
+ machine_api.reset_machine, machine_id, user=current_user,
+ ),
+ 202
+ )
+ return _group_data_action(
+ data,
+ tag=tag_func,
+ poweron=poweron_func,
+ poweroff=poweroff_func,
+ reset=reset_func
+ )
+
+
+@app.route("/switch-machines", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switchmachines():
+ """List switch machines.
+
+ Supported filters: [
+ 'vlans', 'switchIp', 'SwitchIpStart',
+ 'SwitchIpEnd', 'SwitchIpRange', 'port',
+ 'portStart', 'portEnd', 'portRange',
+ 'location', 'tag', 'mac'
+ ]
+ """
+ data = _get_request_args(vlans=_int_converter)
+ _filter_switch_ip(data)
+ _filter_switch_port(data)
+ _filter_general(data, 'vlans')
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switchmachines(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/switches-machines-hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_switchmachines_hosts():
+ """List switch machines or hosts.
+
+ Supported filters: [
+ 'vlans', 'switchIp', 'SwitchIpStart',
+ 'SwitchIpEnd', 'SwitchIpRange', 'port',
+ 'portStart', 'portEnd', 'portRange',
+ 'location', 'tag', 'mac', 'os_name'
+ ]
+
+ """
+ data = _get_request_args(vlans=_int_converter, os_id=_int_converter)
+ _filter_switch_ip(data)
+ _filter_switch_port(data)
+ _filter_general(data, 'vlans')
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ _filter_general(data, 'os_name')
+ return utils.make_json_response(
+ 200,
+ switch_api.list_switchmachines_hosts(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/switch-machines/<int:switch_machine_id>',
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_switchmachine(switch_machine_id):
+ """get switch machine."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ switch_api.get_switchmachine(
+ switch_machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/switch-machines/<int:switch_machine_id>',
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_switchmachine(switch_machine_id):
+ """update switch machine.
+
+ Support fields: [
+ ''port', 'vlans', 'ipmi_credentials', 'tag', 'location'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.update_switchmachine(
+ switch_machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route('/switch-machines/<int:switch_machine_id>', methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_switchmachine(switch_machine_id):
+ """patch switch machine.
+
+ Support fields: [
+ 'vlans', 'ipmi_credentials', 'tag', 'location'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.patch_switchmachine(
+ switch_machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/switch-machines/<int:switch_machine_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_switchmachine(switch_machine_id):
+ """Delete switch machine."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ switch_api.del_switchmachine(
+ switch_machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/machines", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_machines():
+ """List machines.
+
+ Supported filters: [
+ 'tag', 'location', 'mac'
+ ]
+ """
+ data = _get_request_args()
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ return utils.make_json_response(
+ 200,
+ machine_api.list_machines(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/machine/discovery", methods=['POST'])
+def switch_discovery():
+ """switch on/off hardware discovery"""
+ data = _get_request_args()
+
+
+@app.route("/machines", methods=['POST'])
+def add_machine():
+ """add machine by tinycore.
+
+ supported fileds: [
+ 'tag', 'location', 'ipmi_credentials',
+ 'machine_attributes'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ machine_api.add_machine(**data)
+ )
+
+
+@app.route("/machines/<int:machine_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_machine(machine_id):
+ """Get machine."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ machine_api.get_machine(
+ machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/machines/<int:machine_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_machine(machine_id):
+ """update machine.
+
+ Supported fields: [
+ 'tag', 'location', 'ipmi_credentials',
+ 'machine_attributes'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ machine_api.update_machine(
+ machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/machines/<int:machine_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_machine(machine_id):
+ """patch machine.
+
+ Supported fields: [
+ 'tag', 'location', 'ipmi_credentials',
+ 'machine_attributes'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ machine_api.patch_machine(
+ machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/machines/<int:machine_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_machine(machine_id):
+ """Delete machine."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ machine_api.del_machine(
+ machine_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/subnets", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_subnets():
+ """List subnets.
+
+ Supported filters: [
+ 'subnet', 'name'
+ ]
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ network_api.list_subnets(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/subnets/<int:subnet_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_subnet(subnet_id):
+ """Get subnet."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ network_api.get_subnet(
+ subnet_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/subnets", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_subnet():
+ """add subnet.
+
+ Must fields: ['subnet']
+ Optional fields: ['name']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ network_api.add_subnet(user=current_user, **data)
+ )
+
+
+@app.route("/subnets/<int:subnet_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_subnet(subnet_id):
+ """update subnet.
+
+ Support fields: ['subnet', 'name']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ network_api.update_subnet(
+ subnet_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/subnets/<int:subnet_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_subnet(subnet_id):
+ """Delete subnet."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ network_api.del_subnet(
+ subnet_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/adapters", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_adapters():
+ """List adapters.
+
+ Supported filters: [
+ 'name'
+ ]
+ """
+ data = _get_request_args()
+ _filter_general(data, 'name')
+ return utils.make_json_response(
+ 200,
+ adapter_api.list_adapters(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/adapters/<adapter_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_adapter(adapter_id):
+ """Get adapter."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ adapter_api.get_adapter(
+ adapter_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/adapters/<adapter_id>/metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_adapter_metadata(adapter_id):
+ """Get adapter metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_package_metadata(
+ adapter_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/oses/<os_id>/metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_os_metadata(os_id):
+ """Get os metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_os_metadata(
+ os_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/oses/<os_id>/ui_metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def convert_os_metadata(os_id):
+ """Convert os metadata to ui os metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_os_ui_metadata(
+ os_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/flavors/<flavor_id>/metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_flavor_metadata(flavor_id):
+ """Get flavor metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_flavor_metadata(
+ flavor_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/flavors/<flavor_id>/ui_metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def convert_flavor_metadata(flavor_id):
+ """Convert flavor metadata to ui flavor metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_flavor_ui_metadata(
+ flavor_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/adapters/<adapter_id>/oses/<os_id>/metadata",
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_adapter_os_metadata(adapter_id, os_id):
+ """Get adapter metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ metadata_api.get_package_os_metadata(
+ adapter_id, os_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_clusters():
+ """List clusters.
+
+ Supported filters: [
+ 'name', 'os_name', 'owner', 'adapter_name', 'flavor_name'
+ ]
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.list_clusters(
+ user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster(cluster_id):
+ """Get cluster."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_cluster():
+ """add cluster.
+
+ Must fields: ['name', 'adapter_id', 'os_id']
+ Optional fields: ['flavor_id']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.add_cluster(user=current_user, **data)
+ )
+
+
+@app.route("/clusters/<int:cluster_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster(cluster_id):
+ """update cluster.
+
+ Supported fields: ['name', 'reinstall_distributed_system']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_cluster(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_cluster(cluster_id):
+ """Delete cluster."""
+ data = _get_request_data()
+ response = cluster_api.del_cluster(
+ cluster_id, user=current_user, **data
+ )
+ if 'status' in response:
+ return utils.make_json_response(
+ 202, response
+ )
+ else:
+ return utils.make_json_response(
+ 200, response
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/config", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_config(cluster_id):
+ """Get cluster config."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_config(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/metadata", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_metadata(cluster_id):
+ """Get cluster metadata."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_metadata(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/config", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster_config(cluster_id):
+ """update cluster config.
+
+ Supported fields: ['os_config', 'package_config', 'config_step']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_cluster_config(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/config", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_cluster_config(cluster_id):
+ """patch cluster config.
+
+ Supported fields: ['os_config', 'package_config', 'config_step']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_cluster_config(cluster_id, user=current_user, **data)
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/config", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_cluster_config(cluster_id):
+ """Delete cluster config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.del_cluster_config(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_cluster_action(cluster_id):
+ """take cluster action.
+
+ Supported actions: [
+ 'add_hosts', 'remove_hosts', 'set_hosts',
+ 'review', 'deploy', 'check_health', 'apply_patch'
+ ]
+ """
+ data = _get_request_data()
+ url_root = request.url_root
+
+ update_cluster_hosts_func = _wrap_response(
+ functools.partial(
+ cluster_api.update_cluster_hosts, cluster_id, user=current_user,
+ ),
+ 200
+ )
+ review_cluster_func = _wrap_response(
+ functools.partial(
+ cluster_api.review_cluster, cluster_id, user=current_user,
+ ),
+ 200
+ )
+ deploy_cluster_func = _wrap_response(
+ functools.partial(
+ cluster_api.deploy_cluster, cluster_id, user=current_user,
+ ),
+ 202
+ )
+ redeploy_cluster_func = _wrap_response(
+ functools.partial(
+ cluster_api.redeploy_cluster, cluster_id, user=current_user,
+ ),
+ 202
+ )
+ patch_cluster_func = _wrap_response(
+ functools.partial(
+ cluster_api.patch_cluster, cluster_id, user=current_user,
+ ),
+ 202
+ )
+ check_cluster_health_func = _wrap_response(
+ functools.partial(
+ health_report_api.start_check_cluster_health,
+ cluster_id,
+ '%s/clusters/%s/healthreports' % (url_root, cluster_id),
+ user=current_user
+ ),
+ 202
+ )
+ return _group_data_action(
+ data,
+ add_hosts=update_cluster_hosts_func,
+ set_hosts=update_cluster_hosts_func,
+ remove_hosts=update_cluster_hosts_func,
+ review=review_cluster_func,
+ deploy=deploy_cluster_func,
+ redeploy=redeploy_cluster_func,
+ apply_patch=patch_cluster_func,
+ check_health=check_cluster_health_func
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/state", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def get_cluster_state(cluster_id):
+ """Get cluster state."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_state(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports", methods=['POST'])
+def create_health_reports(cluster_id):
+ """Create a health check report.
+
+ Must fields: ['name']
+ Optional fields: [
+ 'display_name', 'report', 'category', 'state', 'error_message'
+ ]
+ """
+ data = _get_request_data()
+ output = []
+ logging.info('create_health_reports for cluster %s: %s',
+ cluster_id, data)
+ if 'report_list' in data:
+ for report in data['report_list']:
+ try:
+ output.append(
+ health_report_api.add_report_record(
+ cluster_id, **report
+ )
+ )
+ except Exception as error:
+ logging.exception(error)
+ continue
+
+ else:
+ output = health_report_api.add_report_record(
+ cluster_id, **data
+ )
+
+ return utils.make_json_response(
+ 200,
+ output
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports", methods=['PUT'])
+def bulk_update_reports(cluster_id):
+ """Bulk update reports.
+
+ request data is a list of health report.
+ Each health report must contain ['name'],
+ may contain [
+ 'display_name', 'report', 'category', 'state', 'error_message'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ health_report_api.update_multi_reports(
+ cluster_id, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_health_reports(cluster_id):
+ """list health report for a cluster."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ health_report_api.list_health_reports(
+ cluster_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports/<name>", methods=['PUT'])
+def update_health_report(cluster_id, name):
+ """Update cluster health report.
+
+ Supported fields: ['report', 'state', 'error_message']
+ """
+ data = _get_request_data()
+ if 'error_message' not in data:
+ data['error_message'] = ""
+
+ return utils.make_json_response(
+ 200,
+ health_report_api.update_report(
+ cluster_id, name, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/healthreports/<name>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def get_health_report(cluster_id, name):
+ """Get health report by cluster id and name."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ health_report_api.get_health_report(
+ cluster_id, name, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_cluster_hosts(cluster_id):
+ """Get cluster hosts."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(cluster_api.list_cluster_hosts(
+ cluster_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/clusterhosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_clusterhosts():
+ """Get cluster hosts."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(cluster_api.list_clusterhosts(
+ user=current_user, **data
+ ))
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/hosts/<int:host_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_host(cluster_id, host_id):
+ """Get clusterhost."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(cluster_api.get_cluster_host(
+ cluster_id, host_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_clusterhost(clusterhost_id):
+ """Get clusterhost."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(cluster_api.get_clusterhost(
+ clusterhost_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/clusters/<int:cluster_id>/hosts", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_cluster_host(cluster_id):
+ """update cluster hosts.
+
+ Must fields: ['machine_id']
+ Optional fields: ['name', 'reinstall_os', 'roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.add_cluster_host(cluster_id, user=current_user, **data)
+ )
+
+
+@app.route(
+ '/clusters/<int:cluster_id>/hosts/<int:host_id>',
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster_host(cluster_id, host_id):
+ """Update cluster host.
+
+ Supported fields: ['name', 'reinstall_os', 'roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_cluster_host(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/clusterhosts/<int:clusterhost_id>',
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_clusterhost(clusterhost_id):
+ """Update cluster host.
+
+ Supported fields: ['name', 'reinstall_os', 'roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/clusters/<int:cluster_id>/hosts/<int:host_id>',
+ methods=['PATCH']
+)
+@log_user_action
+@login_required
+@update_user_token
+def patch_cluster_host(cluster_id, host_id):
+ """Update cluster host.
+
+ Supported fields: ['roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_cluster_host(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/clusterhosts/<int:clusterhost_id>',
+ methods=['PATCH']
+)
+@log_user_action
+@login_required
+@update_user_token
+def patch_clusterhost(clusterhost_id):
+ """Update cluster host.
+
+ Supported fields: ['roles']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_clusterhost(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ '/clusters/<int:cluster_id>/hosts/<int:host_id>',
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_cluster_host(cluster_id, host_id):
+ """Delete cluster host."""
+ data = _get_request_data()
+ response = cluster_api.del_cluster_host(
+ cluster_id, host_id, user=current_user, **data
+ )
+ if 'status' in response:
+ return utils.make_json_response(
+ 202, response
+ )
+ else:
+ return utils.make_json_response(
+ 200, response
+ )
+
+
+@app.route(
+ '/clusterhosts/<int:clusterhost_id>',
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_clusterhost(clusterhost_id):
+ """Delete cluster host."""
+ data = _get_request_data()
+ response = cluster_api.del_clusterhost(
+ clusterhost_id, user=current_user, **data
+ )
+ if 'status' in response:
+ return utils.make_json_response(
+ 202, response
+ )
+ else:
+ return utils.make_json_response(
+ 200, response
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/config",
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_host_config(cluster_id, host_id):
+ """Get clusterhost config."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_host_config(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>/config", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_clusterhost_config(clusterhost_id):
+ """Get clusterhost config."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_clusterhost_config(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/config",
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster_host_config(cluster_id, host_id):
+ """update clusterhost config.
+
+ Supported fields: ['os_config', package_config']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_cluster_host_config(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>/config", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_clusterhost_config(clusterhost_id):
+ """update clusterhost config.
+
+ Supported fields: ['os_config', 'package_config']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_config(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/config",
+ methods=['PATCH']
+)
+@log_user_action
+@login_required
+@update_user_token
+def patch_cluster_host_config(cluster_id, host_id):
+ """patch clusterhost config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_cluster_host_config(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_clusterhost_config(clusterhost_id):
+ """patch clusterhost config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.patch_clusterhost_config(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/config",
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_cluster_host_config(cluster_id, host_id):
+ """Delete clusterhost config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.del_clusterhost_config(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>/config", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_clusterhost_config(clusterhost_id):
+ """Delete clusterhost config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.del_clusterhost_config(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/state",
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_cluster_host_state(cluster_id, host_id):
+ """Get clusterhost state."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_cluster_host_state(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/clusterhosts/<int:clusterhost_id>/state", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_clusterhost_state(clusterhost_id):
+ """Get clusterhost state."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ cluster_api.get_clusterhost_state(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/clusters/<int:cluster_id>/hosts/<int:host_id>/state",
+ methods=['PUT', 'POST']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_cluster_host_state(cluster_id, host_id):
+ """update clusterhost state.
+
+ Supported fields: ['state', 'percentage', 'message', 'severity']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_state(
+ cluster_id, host_id, user=current_user, **data
+ )
+ )
+
+
+@util.deprecated
+@app.route(
+ "/clusters/<clustername>/hosts/<hostname>/state_internal",
+ methods=['PUT', 'POST']
+)
+def update_cluster_host_state_internal(clustername, hostname):
+ """update clusterhost state.
+
+ Supported fields: ['ready']
+ """
+ # TODO(xicheng): it should be merged into update_cluster_host_state.
+ # TODO(xicheng): the api is not login required and no user checking.
+ data = _get_request_data()
+ clusters = cluster_api.list_clusters(name=clustername)
+ if not clusters:
+ raise exception_handler.ItemNotFound(
+ 'no clusters found for clustername %s' % clustername
+ )
+ cluster_id = clusters[0]['id']
+ hosts = host_api.list_hosts(name=hostname)
+ if not hosts:
+ raise exception_handler.ItemNotFound(
+ 'no hosts found for hostname %s' % hostname
+ )
+ host_id = hosts[0]['id']
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_state_internal(
+ cluster_id, host_id, **data
+ )
+ )
+
+
+@app.route(
+ "/clusterhosts/<int:clusterhost_id>/state",
+ methods=['PUT', 'POST']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_clusterhost_state(clusterhost_id):
+ """update clusterhost state.
+
+ Supported fields: ['state', 'percentage', 'message', 'severity']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_state(
+ clusterhost_id, user=current_user, **data
+ )
+ )
+
+
+@util.deprecated
+@app.route(
+ "/clusterhosts/<clusterhost_name>/state_internal",
+ methods=['PUT', 'POST']
+)
+def update_clusterhost_state_internal(clusterhost_name):
+ """update clusterhost state.
+
+ Supported fields: ['ready']
+ """
+ data = _get_request_data()
+ clusterhosts = cluster_api.list_clusterhosts()
+ clusterhost_id = None
+ for clusterhost in clusterhosts:
+ if clusterhost['name'] == clusterhost_name:
+ clusterhost_id = clusterhost['clusterhost_id']
+ break
+ if not clusterhost_id:
+ raise exception_handler.ItemNotFound(
+ 'no clusterhost found for clusterhost_name %s' % (
+ clusterhost_name
+ )
+ )
+ return utils.make_json_response(
+ 200,
+ cluster_api.update_clusterhost_state_internal(
+ clusterhost_id, **data
+ )
+ )
+
+
+@app.route("/hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_hosts():
+ """List hosts.
+
+ Supported fields: ['name', 'os_name', 'owner', 'mac']
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(host_api.list_hosts(
+ user=current_user, **data
+ ))
+ )
+
+
+@app.route("/hosts/<int:host_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_host(host_id):
+ """Get host."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(host_api.get_host(
+ host_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/machines-hosts", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_machines_or_hosts():
+ """Get list of machine of host if the host exists.
+
+ Supported filters: [
+ 'mac', 'tag', 'location', 'os_name', 'os_id'
+ ]
+ """
+ data = _get_request_args(os_id=_int_converter)
+ _filter_machine_tag(data)
+ _filter_machine_location(data)
+ _filter_general(data, 'os_name')
+ _filter_general(data, 'os_id')
+ return utils.make_json_response(
+ 200,
+ _reformat_host(host_api.list_machines_or_hosts(
+ user=current_user, **data
+ ))
+ )
+
+
+@app.route("/machines-hosts/<int:host_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_machine_or_host(host_id):
+ """Get host."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host(host_api.get_machine_or_host(
+ host_id, user=current_user, **data
+ ))
+ )
+
+
+@app.route("/hosts/<int:host_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_host(host_id):
+ """update host.
+
+ Supported fields: ['name', 'reinstall_os']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_host(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_hosts():
+ """update hosts.
+
+ update a list of host as dict each may contains following keys: [
+ 'name', 'reinstall_os'
+ ]
+ """
+ data = _get_request_data_as_list()
+ return utils.make_json_response(
+ 200,
+ host_api.update_hosts(
+ data, user=current_user,
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_host(host_id):
+ """Delete host."""
+ data = _get_request_data()
+ response = host_api.del_host(
+ host_id, user=current_user, **data
+ )
+ if 'status' in response:
+ return utils.make_json_response(
+ 202, response
+ )
+ else:
+ return utils.make_json_response(
+ 200, response
+ )
+
+
+@app.route("/hosts/<int:host_id>/clusters", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def get_host_clusters(host_id):
+ """Get host clusters."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_host_clusters(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/config", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_host_config(host_id):
+ """Get host config."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_host_config(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/config", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_host_config(host_id):
+ """update host config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_host_config(host_id, user=current_user, **data)
+ )
+
+
+@app.route("/hosts/<int:host_id>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def patch_host_config(host_id):
+ """patch host config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.patch_host_config(host_id, user=current_user, **data)
+ )
+
+
+@app.route("/hosts/<int:host_id>/config", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_host_config(host_id):
+ """Delete host config."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.del_host_config(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/networks", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_host_networks(host_id):
+ """list host networks.
+
+ Supported filters: [
+ 'interface', 'ip', 'is_mgmt', 'is_promiscuous'
+ ]
+ """
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ _reformat_host_networks(
+ host_api.list_host_networks(
+ host_id, user=current_user, **data
+ )
+ )
+ )
+
+
+@app.route("/host/networks", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def list_hostnetworks():
+ """list host networks.
+
+ Supported filters: [
+ 'interface', 'ip', 'is_mgmt', 'is_promiscuous'
+ ]
+ """
+ data = _get_request_args(
+ is_mgmt=_bool_converter,
+ is_promiscuous=_bool_converter
+ )
+ return utils.make_json_response(
+ 200,
+ _reformat_host_networks(
+ host_api.list_hostnetworks(user=current_user, **data)
+ )
+ )
+
+
+@app.route(
+ "/hosts/<int:host_id>/networks/<int:host_network_id>",
+ methods=['GET']
+)
+@log_user_action
+@login_required
+@update_user_token
+def show_host_network(host_id, host_network_id):
+ """Get host network."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_host_network(
+ host_id, host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/host/networks/<int:host_network_id>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_hostnetwork(host_network_id):
+ """Get host network."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_hostnetwork(
+ host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/networks", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def add_host_network(host_id):
+ """add host network.
+
+ Must fields: ['interface', 'ip', 'subnet_id']
+ Optional fields: ['is_mgmt', 'is_promiscuous']
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200, host_api.add_host_network(host_id, user=current_user, **data)
+ )
+
+
+@app.route("/hosts/networks", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_host_networks():
+ """add host networks.
+
+ update a list of host network each may contain [
+ 'interface', 'ip', 'subnet_id', 'is_mgmt', 'is_promiscuous'
+ ]
+ """
+ data = _get_request_data_as_list()
+ return utils.make_json_response(
+ 200, host_api.add_host_networks(
+ data=data, user=current_user,)
+ )
+
+
+@app.route(
+ "/hosts/<int:host_id>/networks/<int:host_network_id>",
+ methods=['PUT']
+)
+@log_user_action
+@login_required
+@update_user_token
+def update_host_network(host_id, host_network_id):
+ """update host network.
+
+ supported fields: [
+ 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
+ 'is_promiscuous'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_host_network(
+ host_id, host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/host-networks/<int:host_network_id>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def update_hostnetwork(host_network_id):
+ """update host network.
+
+ supported fields: [
+ 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
+ 'is_promiscuous'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_hostnetwork(
+ host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route(
+ "/hosts/<int:host_id>/networks/<int:host_network_id>",
+ methods=['DELETE']
+)
+@log_user_action
+@login_required
+@update_user_token
+def delete_host_network(host_id, host_network_id):
+ """Delete host network."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.del_host_network(
+ host_id, host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/host-networks/<int:host_network_id>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def delete_hostnetwork(host_network_id):
+ """Delete host network."""
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.del_hostnetwork(
+ host_network_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/state", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def show_host_state(host_id):
+ """Get host state."""
+ data = _get_request_args()
+ return utils.make_json_response(
+ 200,
+ host_api.get_host_state(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/state", methods=['PUT', 'POST'])
+@log_user_action
+@login_required
+@update_user_token
+def update_host_state(host_id):
+ """update host state.
+
+ Supported fields: [
+ 'state', 'percentage', 'message', 'severity'
+ ]
+ """
+ data = _get_request_data()
+ return utils.make_json_response(
+ 200,
+ host_api.update_host_state(
+ host_id, user=current_user, **data
+ )
+ )
+
+
+@app.route("/hosts/<hostname>/state_internal", methods=['PUT', 'POST'])
+def update_host_state_internal(hostname):
+ """update host state.
+
+ Supported fields: ['ready']
+ """
+ data = _get_request_data()
+# host_id = int(host_id)
+# hosts = host_api.list_hosts(id=host_id)
+ hosts = host_api.list_hosts(name=hostname)
+ if not hosts:
+ raise exception_handler.ItemNotFound(
+ 'no hosts found for hostname %s' % hostname
+ )
+ host_id = hosts[0]['id']
+ return utils.make_json_response(
+ 200,
+ host_api.update_host_state_internal(
+ host_id, **data
+ )
+ )
+
+
+@app.route("/hosts/<int:host_id>/action", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def take_host_action(host_id):
+ """take host action.
+
+ Supported actions: [
+ 'poweron', 'poweroff', 'reset'
+ ]
+ """
+ data = _get_request_data()
+ poweron_func = _wrap_response(
+ functools.partial(
+ host_api.poweron_host, host_id, user=current_user,
+ ),
+ 202
+ )
+ poweroff_func = _wrap_response(
+ functools.partial(
+ host_api.poweroff_host, host_id, user=current_user,
+ ),
+ 202
+ )
+ reset_func = _wrap_response(
+ functools.partial(
+ host_api.reset_host, host_id, user=current_user,
+ )
+ )
+ return _group_data_action(
+ data,
+ poweron=poweron_func,
+ poweroff=poweroff_func,
+ reset=reset_func,
+ )
+
+
+def _get_headers(*keys):
+ """Get proxied request headers."""
+ headers = {}
+ for key in keys:
+ if key in request.headers:
+ headers[key] = request.headers[key]
+ return headers
+
+
+def _get_response_json(response):
+ """Get proxies request json formatted response."""
+ try:
+ return response.json()
+ except ValueError:
+ return response.text
+
+
+@app.route("/proxy/<path:url>", methods=['GET'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_get(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding', app.config['AUTH_HEADER_NAME'],
+ 'Cookie'
+ )
+ response = requests.get(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ params=_get_request_args(),
+ headers=headers,
+ stream=True
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+@app.route("/proxy/<path:url>", methods=['POST'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_post(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding',
+ 'Cookie'
+ )
+ response = requests.post(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ data=request.data,
+ headers=headers
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+@app.route("/proxy/<path:url>", methods=['PUT'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_put(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding',
+ 'Cookie'
+ )
+ response = requests.put(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ data=request.data,
+ headers=headers
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+@app.route("/proxy/<path:url>", methods=['PATCH'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_patch(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding',
+ 'Cookie'
+ )
+ response = requests.patch(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ data=request.data,
+ headers=headers
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+@app.route("/proxy/<path:url>", methods=['DELETE'])
+@log_user_action
+@login_required
+@update_user_token
+def proxy_delete(url):
+ """proxy url."""
+ headers = _get_headers(
+ 'Content-Type', 'Accept-Encoding',
+ 'Content-Encoding', 'Accept', 'User-Agent',
+ 'Content-MD5', 'Transfer-Encoding',
+ 'Cookie'
+ )
+ response = requests.delete(
+ '%s/%s' % (setting.PROXY_URL_PREFIX, url),
+ headers=headers
+ )
+ logging.debug(
+ 'proxy %s response: %s',
+ url, response.text
+ )
+ return utils.make_json_response(
+ response.status_code, _get_response_json(response)
+ )
+
+
+def init():
+ logging.info('init flask')
+ database.init()
+ adapter_api.load_adapters()
+ metadata_api.load_metadatas()
+ adapter_api.load_flavors()
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ init()
+ app.run(host='0.0.0.0')
diff --git a/compass-deck/api/api.raml b/compass-deck/api/api.raml
new file mode 100644
index 0000000..6855b57
--- /dev/null
+++ b/compass-deck/api/api.raml
@@ -0,0 +1,4027 @@
+#%RAML 0.8
+title: Compass
+version: v1
+baseUri: http://10.145.89.151/api
+mediaType: application/json
+
+
+/permissions:
+ get:
+ body:
+ application/json:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "alias": "list permissions",
+ "description": "list all permissions",
+ "id": 1,
+ "name": "list_permissions"
+ },
+ ]
+ description: List all permissions
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{permission_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "alias": "list permissions",
+ "description": "list all permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ message: "Cannot find the record in table Permission: {'id': '<permission_id>'}"
+ }
+ description: List a specific permission info
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/users:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "id": 1,
+ "email": "someuser@email.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true,
+ "created_at": "--timestamp---",
+ "last_login_at": "--timestamp---"
+ },
+ ]
+
+ description: Lists information for all users
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ queryParameters:
+ email:
+ is_admin:
+ active:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "email": "admin@someemail.com",
+ "password": "admin",
+ "firstname": "First",
+ "lastname": "Last"
+ }
+ responses:
+ 201:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "email": "user3@someemail.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true,
+ "created_at": "--timestamp---",
+ "last_login_at": "--timestamp---"
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "bad request"
+ }
+ 403:
+ body:
+ application/json:
+ example: |
+ {
+ "forbidden"
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The user already exists!"
+ }
+ description: Creates a user(admin only)
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{user_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 1,
+ "email": "someuser@email.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true,
+ "created_at": "2014-03-25 12:00:00",
+ "last_login_at": "2014-03-25 12:05:00"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The user with id 'some--id--' cannot be found!"
+ }
+ description: Lists information for a specific user
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {"password": 123}
+ responses:
+ 201:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "email": "user3@someemail.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The user with id 'some--id--' cannot be found!"
+ }
+ description: Updates user’s information
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "email": "user3@someemail.com",
+ "first_name": "",
+ "last_name": "",
+ "is_admin": false,
+ "active": true
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The user cannot be found!"
+ }
+ description: Deletes a user(admin only)
+ headers:
+ X-Auth-Header:
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /permissions:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "user_id": 1,
+ "description": "list all permissions",
+ "permission_id": 1,
+ "updated_at": "2014-10-17 16:28:21",
+ "alias": "list permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "itemNotFound",
+ "message": "The user with id 'some--id--' cannot be found!"
+ }
+ description: Lists permissions for a specified user
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "add_permissions": [1,2,3],
+ "remove_permissions": [1],
+ "set_permissions": [1],
+ "disable_user": [1],
+ "enable_user": [1]
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ Add permission:
+
+ [
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "user_id": 1,
+ "description": "list all permissions",
+ "permission_id": 1,
+ "updated_at": "2014-10-17 16:28:21",
+ "alias": "list permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+
+ Remove permission:
+
+ [
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "user_id": 1,
+ "description": "list all permissions",
+ "permission_id": 1,
+ "updated_at": "2014-10-17 16:28:21",
+ "alias": "list permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+
+ Set Permission:
+
+ [
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "user_id": 1,
+ "description": "list all permissions",
+ "permission_id": 1,
+ "updated_at": "2014-10-17 16:28:21",
+ "alias": "list permissions",
+ "id": 1,
+ "name": "list_permissions"
+ }
+ ]
+
+ Enable user:
+
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "updated_at": "2014-10-17 16:28:21",
+ "email": "admin@huawei.com",
+ "is_admin": true,
+ "active": true,
+ "id": 1
+ }
+
+ Disable user:
+
+ {
+ "created_at": "2014-10-17 16:28:21",
+ "updated_at": "2014-10-17 16:28:21",
+ "email": "admin@huawei.com",
+ "is_admin": true,
+ "active": true,
+ "id": 1
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "itemNotFound",
+ "message": "The user cannot be found!"
+ }
+ description: Adds/Removes permissions, Enable/Disable a user (admin only)
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /token:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "email": "admin@huawei.com",
+ "password": "admin"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "expire_timestamp": "2014-10-06 13:25:23",
+ "token": "$1$c1ZWGYEn$WTg57cnP4pEwd9JMJ7beS/",
+ "user_id": 1,
+ "id": 3
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "unauthorized",
+ "message": "Either email or password is wrong!"
+ }
+ description: Authenticates and generates a token
+ /login:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "email": "admin@huawei.com",
+ "password": "admin"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "expire_timestamp": "2014-10-06 13:25:23",
+ "token": "$1$c1ZWGYEn$WTg57cnP4pEwd9JMJ7beS/",
+ "user_id": 1,
+ "id": 3
+ }
+ 401:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "unauthorized",
+ "message": "Either email or password is wrong!"
+ }
+ 403:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "userDisabled",
+ "message”: "User is disabled !"
+ }
+ description: Login
+ /logout:
+ post:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "expire_timestamp": "2014-10-17 18:30:29",
+ "token": "$1$AFqIS5Kn$1ASgOkPv.G1a7pkRRHKY.0",
+ "user_id": 1,
+ "id": 1
+ }
+ ]
+ 401:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "invalid user token: $1$AFqIS5Kn$1ASgOkPv.G1a7pkRRHKY.0",
+ }
+ description: Logout
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/switches:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 2
+ }
+ ]
+ description: Lists switches
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "ip": "172.29.8.40",
+ "credentials":
+ {
+ "version": "2c",
+ "community": "public"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 2
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "IP address '192.168.1.1' already exists"
+ }
+ description: Creates a switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{switch_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the switch which id is '1'."
+ }
+ description: Lists a switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "ip": "172.29.8.40",
+ "credentials":
+ {
+ "version": "2c",
+ "community": "private"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "private"
+ },
+ "id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot update the switch which id is '1'! The switch does not exists."
+ }
+ description: Set the switch properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "ip": "172.29.8.40",
+ "credentials":
+ {
+ "version": "3",
+ "community": "public"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2014-10-17 17:28:06",
+ "updated_at": "2014-10-17 17:28:06",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "3",
+ "community": "public"
+ },
+ "id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot update the switch which id is '1'! The switch does not exists."
+ }
+ description: Updates the switch properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.41",
+ "created_at": "2014-10-17 17:45:17",
+ "updated_at": "2014-10-17 17:45:17",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 3
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table Switch: {'id': 4}"
+ }
+ description: Delete switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /machines:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ Get:
+ [
+ {
+ "vlans": [],
+ "updated_at": "2014-10-17 18:02:21",
+ "created_at": "2014-10-17 18:02:21",
+ "switch_id": 3,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.41",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "10",
+ "switch_machine_id": 204
+ }
+ ]
+ queryParameters:
+ port:
+ portStart:
+ portEnd:
+ portRange:
+ PortPrefix:
+ PortSuffix:
+ vlans:
+ mac:
+ tag:
+ location:
+ description: Lists machines for a specified switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "mac": "28:6e:d4:46:c4:25",
+ "port": "1",
+ "vlans": "88",
+ "ipmi_credentials": {
+ "ip": "1.2.3.4",
+ "username": "test",
+ "password": "test"
+ },
+ "tag": "tag",
+ "location": {
+ "column": "1",
+ "row": "1",
+ "unit": "1"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 1,
+ "mac": "28:6e:d4:47:c8:6c",
+ "vlan": 1,
+ "port": "10"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The switch does not exists."
+ }
+ description: Manually add a machine
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /machines:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "mac": "28:6e:d4:46:c4:25",
+ "port": "1",
+ "vlans": "88"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "duplicate_switches_machines": [
+ {
+ "mac": "a1:b2:c3:d4:e1:f6",
+ "port": "101"
+ }
+ ],
+ "switches_machines": [
+ {
+ "vlans": [],
+ "updated_at": "2015-05-07 10:55:12",
+ "created_at": "2015-05-07 10:55:12",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "70:7b:e8:e2:72:21",
+ "tag": {},
+ "location": {},
+ "switch_ip": "10.145.8.10",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "204",
+ "switch_machine_id": 1
+ },
+ {
+ "vlans": [],
+ "updated_at": "2015-05-07 10:55:12",
+ "created_at": "2015-05-07 10:55:12",
+ "switch_id": 2,
+ "id": 2,
+ "mac": "a1:b2:c3:d4:e1:f6",
+ "tag": {},
+ "location": {},
+ "switch_ip": "10.145.8.10",
+ "ipmi_credentials": {},
+ "machine_id": 2,
+ "port": "101",
+ "switch_machine_id": 2
+ },
+ {
+ "vlans": [],
+ "updated_at": "2015-05-07 10:55:12",
+ "created_at": "2015-05-07 10:55:12",
+ "switch_id": 3,
+ "id": 3,
+ "mac": "a1:b2:c3:d4:e5:f9",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 3,
+ "port": "121",
+ "switch_machine_id": 3
+ }
+ ],
+ "fail_switches_machines": [
+ {
+ "mac": "a1:b5:c3:d4:e5:f9",
+ "port": "131"
+ },
+ {
+ "mac": "a1:b2:c3:d4:e1:f6",
+ "port": "13"
+ }
+ ]
+ }
+ description: Batch switch machines. If the machine is connected to other switch or switch does not exist, it will be added to fail_switches_machines and return. If machine is already existed, it will be added to duplicate_switches_machines.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+
+ /{id}/machines/{machine_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "vlans": [
+ 88
+ ],
+ "updated_at": "2014-10-17 17:40:13",
+ "created_at": "2014-10-17 17:40:13",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "7",
+ "switch_machine_id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table SwitchMachine: {'machine_id': 1000, 'switch_id': 2}"
+ }
+ description: Get machine of a specified switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "port": "80",
+ "vlans": "88",
+ "pmi_credentials": "pmi_credentials here",
+ "tag": "tag here",
+ "location":
+ {"building": "E5"}
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "vlans": [
+ 88
+ ],
+ "updated_at": "2014-10-17 17:40:13",
+ "created_at": "2014-10-17 17:40:13",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5"
+ },
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "7",
+ "switch_machine_id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table SwitchMachine: {'machine_id': 1000, 'switch_id': 2}"
+ }
+ description: set machine property of a specified switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "port": "80",
+ "vlans": "88",
+ "pmi_credentials": "pmi_credentials here",
+ "tag": "tag here",
+ "location":
+ {"city": "Beijing"}
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "vlans": [
+ 88
+ ],
+ "updated_at": "2014-10-17 17:40:13",
+ "created_at": "2014-10-17 17:40:13",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "7",
+ "switch_machine_id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table SwitchMachine: {'machine_id': 1000, 'switch_id': 2}"
+ }
+ description: update machine property of a specified switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "vlans": [
+ 88
+ ],
+ "updated_at": "2014-10-17 17:40:13",
+ "created_at": "2014-10-17 17:40:13",
+ "switch_id": 2,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "7",
+ "switch_machine_id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table SwitchMachine: {'machine_id': 1000, 'switch_id': 2}"
+ }
+ description: Delete a machine from a switch
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{switch_id}/action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "find_machines": 1,
+ "add_macheins": [{"machine_id":1,"port":"10"}],
+ "rermove_machines": 1,
+ "set_machines": [{"machine_id": 1, "port": "10"}]
+ }
+ responses:
+ 202:
+ body:
+ application/json:
+ example: |
+ find_machines:
+ {
+ "status": "action {'find_machines': None} sent",
+ "details": {}
+ }
+ 200:
+ body:
+ application/json:
+ example: |
+ add_machines:
+ [
+ {
+ "vlans": [],
+ "updated_at": "2014-10-17 17:56:44",
+ "created_at": "2014-10-17 17:56:44",
+ "switch_id": 3,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.41",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "10",
+ "switch_machine_id": 203
+ }
+ ]
+
+ remove_machines:
+ []
+ set_machines:
+ [
+ {
+ "vlans": [],
+ "updated_at": "2014-10-17 17:56:44",
+ "created_at": "2014-10-17 17:56:44",
+ "switch_id": 3,
+ "id": 1,
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {},
+ "switch_ip": "172.29.8.41",
+ "ipmi_credentials": {},
+ "machine_id": 1,
+ "port": "10",
+ "switch_machine_id": 203
+ }
+ ]
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot update the switch which id is '1'! The switch does not exists."
+ }
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/switchbatch:
+ post:
+ body:
+ application/json:
+ schema: |
+ [{
+ "switch_ip": "127.0.0.1":
+ "credentials":{
+ "version": "2c",
+ "community": "public"
+ },{
+ "switch_ip": "127.0.0.2"
+ }]
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "switches": [
+ {
+ "vendor": "Huawei",
+ "ip": "10.145.8.10",
+ "created_at": "2015-05-04 16:13:34",
+ "updated_at": "2015-05-04 16:13:34",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "public"
+ },
+ "id": 2
+ },
+ {
+ "ip": "172.29.8.40",
+ "created_at": "2015-05-04 16:13:34",
+ "updated_at": "2015-05-04 16:13:34",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {},
+ "id": 3
+ }
+ ],
+ "fail_switches": [
+ {
+ "ip": "172.29.8.40"
+ }
+ ]
+ }
+ description: Batch switches. If switch ip already existed, switch data will be added in fail_switches list and return.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/machines:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-17 23:22:53",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ },
+ ]
+ queryParameters:
+ mac:
+ tag:
+ location:
+ description: Lists machines
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{machine_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-17 23:22:53",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {},
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID '$machine_id' cannot be found!"
+ }
+ description: Lists machines of a specific machine
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "ipmi_credentials": {
+ "builder": "huawei"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-17 23:58:46",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {
+ "builder": "huawei"
+ },
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID “$machine_id” cannot be found!"
+ }
+ description: set machine properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "ipmi_credentials": {
+ "builder": "huawei"
+ },
+ "tag": {
+ "type": "ES200"
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-18 00:03:12",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {
+ "type": "ES200"
+ },
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID '$machine_id' cannot be found!"
+ }
+ description: updatge machine properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-18 00:03:12",
+ "switches": [],
+ "mac": "28:6e:d4:46:c4:25",
+ "tag": {
+ "type": "ES200"
+ },
+ "location": {
+ "building": "E5",
+ "city": "beijing"
+ },
+ "ipmi_credentials": {},
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID '$machine_id' cannot be found!"
+ }
+ description: Delete a machine (admin only)
+ /action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "tag": {"builder": "huawei"},
+ "poweron": "true",
+ "poweroff": "true",
+ "reset": "true"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ tag example:
+
+ {
+ "created_at": "2014-10-17 17:40:13",
+ "updated_at": "2014-10-18 00:10:58",
+ "id": 2,
+ "switches": [
+ {
+ "switch_ip": "172.29.8.40",
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+ ],
+ "mac": "00:0c:29:2b:c9:d4",
+ "tag": {
+ "builder": "huawei"
+ },
+ "location": {},
+ "switch_ip": "172.29.8.40",
+ "ipmi_credentials": {},
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+
+ poweron/ poweroff / reset is null example:
+
+ {
+ "status": "poweron 00:0c:29:2b:c9:d4 action sent",
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine witch ID '$machine_id' cannot be found!"
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The machine haven't set IPMI info!"
+ }
+ description: machine actions
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/flavors:
+ /{flavor_id}/metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "flavor_config": {
+ "neutron_config": {...},
+ "security": {...},
+ "ha_proxy": {...},
+ "network_mapping": {...}
+
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {message: "flavor <flavor_id> does not exist"}
+ description: List specific flavor metadata.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{flavor_id}/ui_metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "flavor_config":
+ {
+ "category": "service_credentials",
+ "modifiable_data": [
+ "username",
+ "password",
+ ]
+ "table_display_header": [
+ "Service",
+ "UserName",
+ "Password",
+ "Action",
+ ]
+ "accordion_heading": "OpenStack Database and Queue Credentials",
+ "action”: true,
+ "data_structure": "table"
+ },
+ {...},
+ {...}
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {message: "flavor <flavor_id> does not exist"}
+ description: List specific flavor ui metadata.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/adapters:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [{
+ "flavors": [
+ {
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "display_name": "All-In-One",
+ "id": 4,
+ "template": "allinone.tmpl",
+ "name": "allinone"
+ },
+ ],
+ "package_installer": {
+ "id": 1,
+ "alias": "chef_installer",
+ "name": "chef_installer",
+ "settings": {
+ "chef_server_ip": "10.145.88.211",
+ "client_name": "",
+ "chef_server_dns": "compass",
+ "databags": [],
+ "chef_url": "https://10.145.88.211",
+ "key_dir": ""
+ }
+ },
+ "name": "openstack_icehouse",
+ "os_installer": {
+ "id": 1,
+ "alias": "cobbler",
+ "name": "cobbler",
+ "settings": {
+ "credentials": {
+ "username": "cobbler",
+ "password": "cobbler"
+ },
+ "cobbler_url": "http://10.145.88.211/cobbler_api"
+ }
+ },
+ "supported_oses": [
+ {
+ "os_id": 1,
+ "id": 1,
+ "name": "Ubuntu-12.04-x86_64"
+ },
+ {
+ "os_id": 2,
+ "id": 2,
+ "name": "CentOS-6.5-x86_64"
+ }
+ ],
+ "display_name": "OpenStack Icehouse",
+ "id": 3
+ }]
+ queryParameters:
+ name:
+ description: Lists information for all adapters
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "openstack",
+ "display": "OpenStack",
+ "os_installer": "cobbler",
+ "package_installer": "chef",
+ "roles": [ { "display_name": "compute",
+ "name": "os-compute-worker"
+ },
+ { "display_name": "controller",
+ "name": "os-controller"
+ },
+ { "display_name": "network",
+ "name": "os-network"
+ },
+ { "display_name": "storage",
+ "name": "os-block-storage-worker"
+ ],
+ "compatible_os": [
+ {
+ "name": "CentOs",
+ "os_id": 1
+ },
+ {
+ "name": "Ubuntu",
+ "os_id": 2
+ }
+ ]
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The adapter with id 'some_id' cannot be found!"
+ }
+ description: Lists information for a specified adapter
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /oses/{os_id}/metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config": {
+ "security": {
+ "_self": {
+ "mapping_to": "",
+ "description": null,
+ "required_in_whole_config": true,
+ "display_type": null,
+ "js_validator": null,
+ "default_value": null,
+ "field_type": "dict",
+ "name": "security",
+ "required_in_options": false,
+ "is_required": false,
+ "options": null
+ },
+ },
+ "os_config": {
+ "server_credentials": {
+ "_self": {
+ "mapping_to": "server_credentials",
+ "description": null,
+ "required_in_whole_config": true,
+ "display_type": null,
+ "js_validator": null,
+ "default_value": null,
+ "field_type": "dict",
+ "name": "server_credentials",
+ "required_in_options": false,
+ "is_required": false,
+ "options": null
+ },
+ "username": {
+ "_self": {
+ "mapping_to": "username",
+ "description": "username",
+ "required_in_whole_config": false,
+ "display_type": "text",
+ "js_validator": null,
+ "default_value": "root",
+ "field_type": "basestring",
+ "name": "username",
+ "required_in_options": false,
+ "is_required": true,
+ "options": null
+ }
+ },
+ },
+ },
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The adapter with id 'some_id' cannot be found!"
+ }
+ description: Lists config formats for a specified adapter and os
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /oses/{os_id}/ui_metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_global_config": [
+ {
+ "title": "Server Credentials",
+ "data": [
+ {
+ "default_value": "root",
+ "display_name": "User name",
+ "name": "username",
+ "display_type": "text",
+ "is_required": "true",
+ "placeholder": "Username",
+ "order": 1
+ },
+ {
+ "display_name": "Confirm Password",
+ "name": "confirmPassword",
+ "datamatch": "password",
+ "display_type": "password",
+ "is_required": "true",
+ "placeholder": "Confirm Password",
+ "order": 3
+ },
+ {
+ "display_name": "Password",
+ "name": "password",
+ "display_type": "password",
+ "is_required": "true",
+ "placeholder": "Password",
+ "order": 2
+ }],
+ "order": 2,
+ "name": "server_credentials"
+ },
+ }
+ }]
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "os <os_id> does not exist"
+ }
+ description: List specified os ui metadata.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+
+/subnets:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [{
+ "updated_at": "2014-10-18 21:24:46",
+ "subnet": "10.145.88.0/23",
+ "created_at": "2014-10-18 21:24:46",
+ "id": 1,
+ "name": "10.145.88.0/23"
+ }]
+ description: Gets all subnetworks information
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "subnet": "10.172.20.0/24",
+ "name": "test_subnet"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "updated_at": "2014-10-18 21:24:46",
+ "subnet": "10.145.88.0/23",
+ "created_at": "2014-10-18 21:24:46",
+ "id": 1,
+ "name": "10.145.88.0/23"
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Keyword '$somekey' cannot be recognized!"
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnet already exists!"
+ }
+ description: Creates one subnetwork
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{subnet_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "updated_at": "2014-10-18 21:24:46",
+ "subnet": "10.145.88.0/23",
+ "created_at": "2014-10-18 21:24:46",
+ "id": 1,
+ "name": "10.145.88.0/23"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnetwork with id 'some_id' cannot be found!"
+ }
+ description: Gets one subnetwork info
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "subnet": "10.172.20.0/24",
+ "name": "update_subnet"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "updated_at": "2014-10-18 21:44:17",
+ "subnet": "10.145.86.0/23",
+ "created_at": "2014-10-18 21:43:50",
+ "id": 1,
+ "name": "10.145.86.0/23"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnetwork with id 'some_id' cannot be found!"
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnet name already exists!"
+ }
+ description: set subnet properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 403:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnetwork is in use by some interface. Cannot delete it."
+ }
+
+
+ {
+ "message": "Subnetwork can only be deleted by creator or admin!"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Subnetwork with id 'some_id' cannot be found!"
+ }
+ description: Deletes a subnetwork (owner, admin only)
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/clusters:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "created_at": "2014-10-18 23:01:23",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster1",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:01:23",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 1
+ }
+ ]
+ queryParameters:
+ name:
+ os_name:
+ owner:
+ adapter_name:
+ flavor_name:
+ description: Lists all information for all clusters
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "adapter_id": 3,
+ "name": "add_cluster",
+ "os_id": 1,
+ "flavor_id": 1
+ }
+ responses:
+ 201:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-18 23:01:23",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster1",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:01:23",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 1
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with name 'cluster_01' already exists!"
+ }
+ description: Creates a new cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{cluster_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-18 23:01:23",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster1",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:01:23",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Lists information for a specified cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "name": "update_cluster"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-18 23:16:02",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster_new",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:16:39",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 2
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster <cluster_id> not found"
+ }
+ description: set properties of cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "created_at": "2014-10-18 23:01:23",
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster1",
+ "reinstall_distributed_system": true,
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:01:23",
+ "owner": "admin@huawei.com",
+ "os_id": 2,
+ "distributed_system_installed": false,
+ "flavor": {
+ "display_name": "All-In-One",
+ "name": "allinone",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "role_id": 35,
+ "flavor_id": 4,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "adapter_id": 3,
+ "template": "allinone.tmpl",
+ "id": 4
+ },
+ "id": 1
+ }
+ 403:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster has been deployed or is being installed. Not allowed to delete it now!"
+ }
+ description: Deletes a specific cluster before deploy (admin, owner only). Hosts will be still kept even cluster(s) is deleted.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /config:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config": {
+ },
+ "os_config": {
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Gets config information for a specified cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config":{
+ "general": {
+ "language": "EN",
+ "timezone": "PDT",
+ "domain": "xxx",
+ "default_gateway": "10.0.0.1"
+ },
+ "server_credentials": {
+ "username": "admin",
+ "password": "admin"
+ },
+ "partition": {
+ "/var" : {
+ "_type": "$path",
+ "max_size": "20",
+ "size_percentage": "20"
+ }
+ }
+ },
+ "package_config":{
+ "network_mapping": {
+ "management": {
+ "interface": "eth0"
+ },
+ "tenant": {
+ "interface": "eth1"
+ },
+ "storage": {
+ "interface":" eth2"
+ },
+ "public": {
+ "interface": "eth3"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config”: {
+ "general”: {
+ "language": "EN",
+ "timezone": "PDT",
+ "domain": "xxx",
+ "default_gateway": "10.0.0.1"
+ },
+ "server_crendentials": {
+ "username": "admin",
+ "password": "admin"
+ },
+ "partition": {
+ "/var" : {
+ "max_size": "20",
+ "size_percentage": "20",
+ },
+ }
+ }
+
+ {
+ "package_config": {
+ "network_mapping": {
+ "management": {
+ "interface": "eth0"
+ },
+ "tenant": {
+ "interface": "eth1"
+ },
+ "storage": {
+ "interface":"eth2"
+ },
+ "public": {
+ "interface": "eth3"
+ }
+ }
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: set properties in cluster config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "package_config": {
+ "security": {
+ "dashboard_credentials": {
+ "username": "root"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config":{
+ "security": {
+ "service_crendentials": {
+ "image": {
+ "username": "admin",
+ "password": "admin"
+ },
+ ...
+ },
+ "dashboard_credentials":{
+ "username": "root",
+ "password": "admin"
+ }
+ }
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: update properties in cluster config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config":{
+ "security": {
+ "service_crendentials": {
+ "image": {
+ "username": "admin",
+ "password": "admin"
+ },
+ ...
+ }
+ }
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: delete cluster config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /state:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config": {
+ },
+ "os_config": {
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: get cluster state
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /hosts:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "id" : 1,
+ "name": "host_01",
+ "dns": "xxx",
+ "os": "Centos",
+ "mac": "---MAC-address---",
+ "machine_id": 1,
+ "os_installed": true,
+ },
+ …...
+ ]
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Gets the information of the hosts belonging to this cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "machine_id": 1,
+ "name": "cluster_host",
+ "reinstall_os": "True",
+ "roles": ["allinone-compute"]
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_installer": {
+ "id": 1,
+ "alias": "cobbler",
+ "name": "cobbler",
+ "settings": {
+ "credentials": {
+ "username": "cobbler",
+ "password": "cobbler"
+ },
+ "cobbler_url": "http://10.145.88.211/cobbler_api"
+ }
+ },
+ "ip": null,
+ "clusterhost_id": 2,
+ "updated_at": "2014-10-18 23:47:47",
+ "switches": [
+ {
+ "switch_ip": "172.29.8.40",
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+ ],
+ "os_installed": false,
+ "tag": {},
+ "cluster_id": 2,
+ "id": 2,
+ "switch_ip": "172.29.8.40",
+ "networks": {
+ },
+ "hostname": null,
+ "reinstall_os": true,
+ "owner": "admin@huawei.com",
+ "port": "4",
+ "location": {},
+ "os_name": "CentOS-6.5-x86_64",
+ "reinstall_distributed_system": true,
+ "mac": "00:0c:29:2b:c9:d4",
+ "host_id": 2,
+ "distributed_system_installed": false,
+ "name": "None.cluster_new",
+ "roles": [],
+ "clustername": "cluster_new",
+ "created_at": "2014-10-18 23:47:47",
+ "machine_id": 2
+ }
+ 409:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "host <host_id> already exists"
+ }
+ description: add host to a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{host_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "host_01",
+ "dns": "xxx",
+ "os": "Centos",
+ "mac": "---MAC-address---",
+ "machine_id": 1,
+ "os_installed": true,
+ "links": [
+ {
+ "href" : "/hosts/1",
+ "rel": "self"
+ },
+ {
+ "href": "/clusters/1/hosts/1/config",
+ "rel": "host package config"
+ }
+ ]
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: get host of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "name": "update_cluster_host",
+ "reinstall_os": "False",
+ "roles": ["ha-proxy"]
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_installer": {
+ "id": 1,
+ "alias": "cobbler",
+ "name": "cobbler",
+ "settings": {
+ "credentials": {
+ "username": "cobbler",
+ "password": "cobbler"
+ },
+ "cobbler_url": "http://10.145.88.211/cobbler_api"
+ }
+ },
+ "ip": null,
+ "clusterhost_id": 2,
+ "updated_at": "2014-10-19 00:10:43",
+ "switches": [
+ {
+ "switch_ip": "172.29.8.40",
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+ ],
+ "os_installed": false,
+ "tag": {},
+ "cluster_id": 2,
+ "id": 2,
+ "switch_ip": "172.29.8.40",
+ "networks": {},
+ "hostname": null,
+ "reinstall_os": true,
+ "owner": "admin@huawei.com",
+ "port": "4",
+ "location": {},
+ "os_name": "CentOS-6.5-x86_64",
+ "reinstall_distributed_system": true,
+ "mac": "00:0c:29:2b:c9:d4",
+ "host_id": 2,
+ "distributed_system_installed": false,
+ "name": "None.cluster_new",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ }
+ ],
+ "clustername": "cluster_new",
+ "created_at": "2014-10-18 23:47:47",
+ "machine_id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host properties of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "roles": "os-controller"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_installer": {
+ "id": 1,
+ "alias": "cobbler",
+ "name": "cobbler",
+ "settings": {
+ "credentials": {
+ "username": "cobbler",
+ "password": "cobbler"
+ },
+ "cobbler_url": "http://10.145.88.211/cobbler_api"
+ }
+ },
+ "ip": null,
+ "clusterhost_id": 2,
+ "updated_at": "2014-10-19 00:10:43",
+ "switches": [
+ {
+ "switch_ip": "172.29.8.40",
+ "vlans": [
+ 88
+ ],
+ "port": "4"
+ }
+ ],
+ "os_installed": false,
+ "tag": {},
+ "cluster_id": 2,
+ "id": 2,
+ "switch_ip": "172.29.8.40",
+ "networks": {},
+ "hostname": null,
+ "reinstall_os": true,
+ "owner": "admin@huawei.com",
+ "port": "4",
+ "location": {},
+ "os_name": "CentOS-6.5-x86_64",
+ "reinstall_distributed_system": true,
+ "mac": "00:0c:29:2b:c9:d4",
+ "host_id": 2,
+ "distributed_system_installed": false,
+ "name": "None.cluster_new",
+ "roles": [
+ {
+ "display_name": "all in one compute",
+ "description": "all in one compute",
+ "adapter_id": 3,
+ "optional": true,
+ "id": 35,
+ "name": "allinone-compute"
+ },
+ {
+ "name": "new-role",
+ ...
+ }
+ ],
+ "clustername": "cluster_new",
+ "created_at": "2014-10-18 23:47:47",
+ "machine_id": 2
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: update host properties of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ description: delete host from a cluster
+ /config:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ ...
+ },
+ "package_config": {
+ ...
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: get config of a host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config": {
+ "general": {
+ "language": "EN",
+ "timezone": "UTC",
+ "http_proxy": "http://127.0.0.1:3128",
+ "https_proxy": "http://127.0.0.1:3128",
+ "no_proxy": [
+ "127.0.0.1",
+ "compass"
+ ],
+ "ntp_server": "127.0.0.1",
+ "dns_servers": [
+ "127.0.0.1"
+ ],
+ "domain": "ods.com",
+ "search_path": [
+ "ods.com"
+ ],
+ "default_gateway": "127.0.0.1"
+ },
+ "server_credentials": {
+ "username": "root",
+ "password": "root"
+ },
+ "partition": {
+ "/var": {
+ "max_size": "100G",
+ "percentage": 10,
+ "size": "1G"
+ }
+ }
+ },
+ "package_config": {
+ "network_mapping": {
+ "management": {
+ "interface": "eth0"
+ },
+ "tenant": {
+ "interface": "eth1"
+ },
+ "storage": {
+ "interface":"eth2"
+ },
+ "public": {
+ "interface": "eth3"
+ }
+ },
+ "services_credentials": {
+ "image": {
+ "username": "xxx",
+ "password": "xxx"
+ },
+ "metering": {
+ "username": "xxx",
+ "password": "xxx"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ …..
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config": {
+ "general": {
+ "language": "EN",
+ "timezone": "UTC",
+ "http_proxy": "http://127.0.0.1:3128",
+ "https_proxy": "http://127.0.0.1:3128",
+ "no_proxy": [
+ "127.0.0.1",
+ "compass"
+ ],
+ "ntp_server": "127.0.0.1",
+ "dns_servers": [
+ "127.0.0.1"
+ ],
+ "domain": "ods.com",
+ "search_path": [
+ "ods.com"
+ ],
+ "default_gateway": "127.0.0.1"
+ },
+ "server_credentials": {
+ "username": "root",
+ "password": "root"
+ },
+ "partition": {
+ "/var": {
+ "max_size": "100G",
+ "percentage": 10,
+ "size": "1G"
+ }
+ }
+ },
+ "package_config": {
+ "network_mapping": {
+ "management": {
+ "interface": "eth0"
+ },
+ "tenant": {
+ "interface": "eth1"
+ },
+ "storage": {
+ "interface":"eth2"
+ },
+ "public": {
+ "interface": "eth3"
+ }
+ },
+ "services_credentials": {
+ "image": {
+ "username": "xxx",
+ "password": "xxx"
+ },
+ "metering": {
+ "username": "xxx",
+ "password": "xxx"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ ...//the same as PATCH cluster config
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: update host config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ ...//the same as PATCH cluster config
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: delete host config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /state:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "INSTALLING",
+ "percentage": 0.5,
+ "severity": "INFO",
+ "message": "-----some--message-----",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: get host state of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "state": "INSTALLING"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "SUCCESSFUL",
+ "percentage": 1,
+ "severity": "INFO",
+ "message": "-----some--message-----",
+ "updated_at": "---timestamp---"
+ }
+ OR
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "ERROR",
+ "percentage": 0.7,
+ "severity": "ERROR",
+ "message": "---some-error-message---",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host state properties of a cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "add_hosts": {
+ "machines": [{
+ "machine_id": 1,
+ "host_id": 1,
+ "reinstall_os": "True"
+ },{
+ "machine_id": 2,
+ "host_id": 2
+ }]
+ },
+ "set_hosts": {
+ "machines": [{
+ "machine_id": 3
+ },{
+ "machine_id": 4
+ }]
+ },
+ "remove_hosts": {
+ "hosts": [1]
+ },
+ "review": {
+ "hosts": [1,2,3]
+ },
+ "deploy": {
+ "hosts": [1,2,3]
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "hosts": [
+ {
+ "id" : 5,
+ "machine_id": 10
+ },
+ {
+ "id" : 6,
+ "machine_id": 11
+ },
+ {
+ "id" : 7,
+ "machine_id": 12
+ }
+ ]
+ }
+
+ OR
+
+ {
+ "hosts": [
+ {
+ "id" : 1,
+ "machine_id": 13
+ },
+ {
+ "id" : 2,
+ "machine_id": 14
+ },
+ {
+ "id" : 3,
+ "machine_id": 15
+ }
+ ]
+ }
+
+ OR
+
+ {
+ "hosts": [
+ {
+ "id" : 1,
+ "machine_id": 13
+ }
+ ]
+ }
+
+ OR
+ {
+ "hosts": [
+ {
+ "id" : 1,
+ "machine_id": 10
+ },
+ {
+ "id" : 2,
+ "machine_id": 11
+ },
+ {
+ "id" : 3,
+ "machine_id": 12
+ }
+ ]
+ }
+
+ OR
+
+ {
+ "cluster": {"id": 1},
+ "hosts": [{"id": 1}, {"id": 2}, {"id": 3}]
+ }
+
+ OR
+
+ {
+ "status": "deploy action sent",
+ "cluster": {
+ "id": 1,
+ },
+ "hosts": [
+ {
+ "id": 3
+ }
+ ]
+ }
+
+
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Takes an action for a specific cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /metadata:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "package_config": {
+ },
+ "os_config": {
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cluster with id 'some_id' cannot be found!"
+ }
+ description: Get metadata of a specific cluster
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+
+/hosts:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "id” : 1,
+ "name": "host_01",
+ "machine_id": 1,
+ "mac": "---MAC-address--",
+ "ip": "192.168.1.2",
+ "os": "CentOS",
+ "os_installed": false,
+ "clusters": ["cluster_01"],
+ "created_by": "user1@email.com",
+ "created_at": "---timestamp---",
+ "updated_at": "---timestamp---",
+ "links”: [
+ {
+ "href" : "/hosts/1",
+ "rel": "self
+ }
+ ]
+ },
+ ...
+ ]
+ queryParameters:
+ name:
+ os_name:
+ owner:
+ mac:
+ description: Lists information for all hosts
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{host_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "host_01",
+ "machine_id": 1,
+ "mac": "---MAC-address--”,
+ "ip": "192.168.1.2"
+ "os": "CentOs",
+ "os_installed": false,
+ "domain": "xxx",
+ "dns": "xxx",
+ "created_by": "user1@email.com",
+ "created_at": "---timestamp---",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Lists information for a specified host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "name": "update_host_name"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "host1"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host properties.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id" : 1,
+ "name": "host_01_new",
+ "mac": "---MAC-address--",
+ "os_name": "CentOs",
+ "os_installed": false
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "type": "itemNotFound",
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Deletes a host (admin only). The host must be not in any cluster.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /action:
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "poweron": [1],
+ "poweroff": [1],
+ "reset": [1]
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "status": "host <host_id> power<on|off|reset> action sent",
+ "host": {...}
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The host witch ID '$host_id' cannot be found!"
+ }
+ 400:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "The host didnot set IPMI info!"
+ }
+ description: Poweron, poweroff, reset this host by IPMI
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /clusters:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "os_name": "CentOS-6.5-x86_64",
+ "name": "cluster_new",
+ "reinstall_distributed_system": true,
+ "created_at": "2014-10-18 23:16:02",
+ "adapter_id": 3,
+ "updated_at": "2014-10-18 23:16:39",
+ "owner": "admin@huawei.com",
+ "distributed_system_installed": false,
+ "id": 2
+ }
+ ]
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Lists clusters which the host belongs to
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /config:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ "global": {
+ "language": "EN",
+ "timezone": "PDT",
+ }
+ "partition": {
+ "/var": {
+ "max_size": "20",
+ "size_percentage": "30"
+ },
+ "/home": {
+ "max_size": "20",
+ "size_percentage": "40"
+ }
+ }
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Lists config information for a specified host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config": {
+ "general": {
+ "language": "EN",
+ "timezone": "UTC",
+ "http_proxy": "http://127.0.0.1:3128",
+ "https_proxy": "http://127.0.0.1:3128",
+ "no_proxy": [
+ "127.0.0.1",
+ "compass"
+ ],
+ "ntp_server": "127.0.0.1",
+ "dns_servers": [
+ "127.0.0.1"
+ ],
+ "domain": "ods.com",
+ "search_path": [
+ "ods.com"
+ ],
+ "default_gateway": "127.0.0.1"
+ },
+ "server_credentials": {
+ "username": "root",
+ "password": "root"
+ },
+ "partition": {
+ "/var": {
+ "max_size": "100G",
+ "percentage": 10,
+ "size": "1G"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ …
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set config properties for a specified host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "os_config": {
+ "general": {
+ "language": "EN",
+ "timezone": "UTC",
+ "http_proxy": "http://127.0.0.1:3128",
+ "https_proxy": "http://127.0.0.1:3128",
+ "no_proxy": [
+ "127.0.0.1",
+ "compass"
+ ],
+ "ntp_server": "127.0.0.1",
+ "dns_servers": [
+ "127.0.0.1"
+ ],
+ "domain": "ods.com",
+ "search_path": [
+ "ods.com"
+ ],
+ "default_gateway": "127.0.0.1"
+ },
+ "server_credentials": {
+ "username": "root",
+ "password": "root"
+ },
+ "partition": {
+ "/var": {
+ "max_size": "100G",
+ "percentage": 10,
+ "size": "1G"
+ }
+ }
+ }
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ ....
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: update host config properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "os_config": {
+ ...
+ }
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: delete host config
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /state:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "state": "INSTALLING",
+ "percentage": 0.5,
+ "severity": "INFO",
+ "message": "-----some--message-----",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: get host state
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "state": "INSTALLING"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "SUCCESSFUL",
+ "percentage": 1,
+ "severity": "INFO",
+ "message": "-----some--message-----",
+ "updated_at": "---timestamp---"
+ }
+
+ OR
+
+ {
+ "cluster_id" : 1,
+ "host_id": 10
+ "state": "ERROR",
+ "percentage": 0.7,
+ "severity": "ERROR",
+ "message": "---some-error-message---",
+ "updated_at": "---timestamp---"
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host state properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /network:
+ get:
+ body:
+ application/json:
+ schema: |
+ [
+ {
+ "interface": "eth0",
+ "ip": "10.172.20.91",
+ "subnet_id": 1,
+ "is_mgmt": "False",
+ "is_promiscuous": "False"
+ },
+ {
+ "interface": "eth1",
+ "ip": "10.172.20.110",
+ "subnet_id": 1,
+ "is_mgmt": "False",
+ "is_promiscuous": "False"
+ }
+ ]
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "eth0": {
+ "id": 1,
+ "interface": "eth0",
+ "ip": "192.168.10.1",
+ "is_mgmt": true,
+ "is_promiscuous": false,
+ "subnet_id": 1,
+ },
+ "eth1": {
+ "id": 2,
+ "interface": "eth1",
+ "ip": "10.12.123.1",
+ "is_promiscuous": true,
+ "subnet_id": 2,
+ },
+ …..
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: Lists network info for a specified host
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "interface": "eth0",
+ "ip": "10.145.89.152",
+ "subnet_id": 1,
+ "is_mgmt": "True",
+ "is_promiscuous": "False"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "interface": "eth3",
+ "ip": "12.140.10.1",
+ "is_promiscuous": true,
+ "is_mgmt": false,
+ "subnet_id": 3,
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id ‘some_id’ cannot be found!"
+ }
+ description: Creates an interface config entry
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{interface}:
+ get:
+ description: list host network information
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "interface": "eth1",
+ "ip": "10.145.89.155",
+ "subnet_id": 1,
+ "is_mgmt": "True",
+ "is_promiscuous": "False"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "interface": "eth3",
+ "ip": "12.140.10.2",
+ "is_promiscuous": true,
+ "is_mgmt": false,
+ "subnet_id": 4,
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: set host network properties
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "id": 3,
+ "interface": "eth3",
+ "ip": "12.140.10.1",
+ "is_promiscuous”: true,
+ "is_mgmt": false,
+ "subnet_id": 3
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": " Host with id 'some_id' cannot be found!"
+ }
+ description: delete a host network
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/proxy/{path}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ [
+ {
+ "created_at": "2014-10-19 10:50:04",
+ "updated_at": "2014-10-19 10:50:04",
+ "email": "admin@huawei.com",
+ "is_admin": true,
+ "active": true,
+ "id": 1
+ }
+ ]
+ queryParameters:
+ URL:
+ example: http://10.145.88.211/api/proxy/users
+ description: proxy get request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ post:
+ body:
+ application/json:
+ schema: |
+ {
+ "url": "http://10.145.88.211/api/proxy/subnets"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "subnet": "10.145.86.0/23",
+ "created_at": "2014-10-19 11:25:33",
+ "updated_at": "2014-10-19 11:25:33",
+ "name": "10.145.86.0/23",
+ "id": 3
+ }
+ description: proxy post request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "url": "http://10.145.88.211/api/proxy/subnets/3"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "subnet": "10.145.84.0/23",
+ "created_at": "2014-10-19 11:25:33",
+ "updated_at": "2014-10-19 11:29:08",
+ "name": "10.145.84.0/23",
+ "id": 3
+ }
+ description: proxy put request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ patch:
+ body:
+ application/json:
+ schema: |
+ {
+ "url": "http://10.145.88.211/api/proxy/subnets/3"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.42",
+ "created_at": "2014-10-19 11:31:40",
+ "updated_at": "2014-10-19 11:33:46",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "private"
+ },
+ "id": 3
+ }
+ description: proxy patch request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "172.29.8.42",
+ "created_at": "2014-10-19 11:31:40",
+ "updated_at": "2014-10-19 11:33:46",
+ "state": "initialized",
+ "filters": "",
+ "credentials": {
+ "version": "2c",
+ "community": "private"
+ },
+ "id": 3
+ }
+ queryParameters:
+ URL:
+ example: http://10.145.88.211/api/proxy/switches/3
+ description: proxy delete request
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/host/networks:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "eth1": {
+ "ip": "192.168.100.155",
+ "created_at": "2015-04-17 14:55:55",
+ "is_promiscuous": true,
+ "updated_at": "2015-04-17 14:55:55",
+ "netmask": "255.255.254.0",
+ "is_mgmt": false,
+ "interface": "eth1",
+ "id": 1
+ },
+ "eth0": {
+ "ip": "10.145.89.155",
+ "created_at": "2015-04-17 14:55:55",
+ "is_promiscuous": false,
+ "updated_at": "2015-04-17 14:55:55",
+ "netmask": "255.255.254.0",
+ "is_mgmt": true,
+ "interface": "eth0",
+ "id": 2
+ }
+ }
+ description: List all host networks
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ /{host_network_id}:
+ get:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "192.168.100.155",
+ "created_at": "2015-04-17 14:55:55",
+ "is_promiscuous": true,
+ "updated_at: "2015-04-17 14:55:55",
+ "netmask": "255.255.254.0",
+ "is_mgmt": false,
+ "interface": "eth1",
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ "message": "Cannot find the record in table HostNetwork: {'id': <host_network_id>}",
+ }
+ description: List specifig host network info
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+/host-networks/{host_network_id}:
+ put:
+ body:
+ application/json:
+ schema: |
+ {
+ "interface": "eth0",
+ "ip": "10.145.88.10"
+ }
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip": "192.168.100.159",
+ "created_at": "2015-04-17 14:55:55",
+ "is_promiscuous": true,
+ "updated_at: "2015-04-17 14:55:55",
+ "netmask": "255.255.254.0",
+ "is_mgmt": false,
+ "interface": "eth1",
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ message: "Cannot find the record in table HostNetwork: {'id': <host_network_id>}"
+ }
+ description: Update a specific host network info.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+ delete:
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "ip: "10.145.89.155",
+ "created_at": "2015-04-17 15:44:54"
+ "is_promiscuous": false,
+ "updated_at": "2015-04-17 15:44:54",
+ "netmask": "255.255.254.0",
+ "is_mgmt": false
+ "interface": "eth0",
+ "id": 1
+ }
+ 404:
+ body:
+ application/json:
+ example: |
+ {
+ message: "Cannot find the record in table HostNetwork: {'id': <host_network_id>}"
+ }
+ description: Delete a host network.
+ headers:
+ Access-token:
+ displayName: X-Auth-Header
+ required: true
+ example: $1$fCD2zLIa$hikkNkqDe0qAXgKHDzw0E0
+
+
+
diff --git a/compass-deck/api/auth_handler.py b/compass-deck/api/auth_handler.py
new file mode 100644
index 0000000..3c22ebb
--- /dev/null
+++ b/compass-deck/api/auth_handler.py
@@ -0,0 +1,49 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from itsdangerous import BadData
+import logging
+import sys
+
+from compass.api import app
+from compass.api import exception_handler
+from compass.api import login_manager
+
+from compass.db.api import user as user_api
+from compass.db.api.user import UserWrapper
+
+
+def authenticate_user(email, password, **kwargs):
+ """Authenticate a user by email and password."""
+ user = user_api.get_user_object(
+ email, **kwargs
+ )
+ user.authenticate(password)
+ return user
+
+
+@login_manager.token_loader
+def load_user_from_token(token):
+ return user_api.get_user_object_from_token(token)
+
+
+@login_manager.header_loader
+def load_user_from_header(header):
+ """Return a user object from token."""
+ return user_api.get_user_object_from_token(header)
+
+
+@login_manager.user_loader
+def load_user(token):
+ return user_api.get_user_object_from_token(token)
diff --git a/compass-deck/api/exception_handler.py b/compass-deck/api/exception_handler.py
new file mode 100644
index 0000000..67c780e
--- /dev/null
+++ b/compass-deck/api/exception_handler.py
@@ -0,0 +1,92 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Exceptions for RESTful API."""
+import logging
+import simplejson as json
+import traceback
+
+from compass.api import app
+from compass.api import utils
+
+
+class HTTPException(Exception):
+ def __init__(self, message, status_code):
+ super(HTTPException, self).__init__(message)
+ self.traceback = traceback.format_exc()
+ self.status_code = status_code
+
+ def to_dict(self):
+ return {'message': str(self)}
+
+
+class ItemNotFound(HTTPException):
+ """Define the exception for referring non-existing object."""
+ def __init__(self, message):
+ super(ItemNotFound, self).__init__(message, 410)
+
+
+class BadRequest(HTTPException):
+ """Define the exception for invalid/missing parameters.
+
+ User making a request in invalid state cannot be processed.
+ """
+ def __init__(self, message):
+ super(BadRequest, self).__init__(message, 400)
+
+
+class Unauthorized(HTTPException):
+ """Define the exception for invalid user login."""
+ def __init__(self, message):
+ super(Unauthorized, self).__init__(message, 401)
+
+
+class UserDisabled(HTTPException):
+ """Define the exception for disabled users."""
+ def __init__(self, message):
+ super(UserDisabled, self).__init__(message, 403)
+
+
+class Forbidden(HTTPException):
+ """Define the exception for invalid permissions."""
+ def __init__(self, message):
+ super(Forbidden, self).__init__(message, 403)
+
+
+class BadMethod(HTTPException):
+ """Define the exception for invoking unsupported methods."""
+ def __init__(self, message):
+ super(BadMethod, self).__init__(message, 405)
+
+
+class ConflictObject(HTTPException):
+ """Define the exception for creating an existing object."""
+ def __init__(self, message):
+ super(ConflictObject, self).__init__(message, 409)
+
+
+@app.errorhandler(Exception)
+def handle_exception(error):
+ if hasattr(error, 'to_dict'):
+ response = error.to_dict()
+ else:
+ response = {'message': str(error)}
+ if app.debug and hasattr(error, 'traceback'):
+ response['traceback'] = error.traceback
+
+ status_code = 400
+ if hasattr(error, 'status_code'):
+ status_code = error.status_code
+
+ return utils.make_json_response(status_code, response)
diff --git a/compass-deck/api/utils.py b/compass-deck/api/utils.py
new file mode 100644
index 0000000..87977cd
--- /dev/null
+++ b/compass-deck/api/utils.py
@@ -0,0 +1,35 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utils for API usage."""
+from flask import make_response
+import simplejson as json
+
+
+def make_json_response(status_code, data):
+ """Wrap json format to the reponse object."""
+
+ result = json.dumps(data, indent=4) + '\r\n'
+ resp = make_response(result, status_code)
+ resp.headers['Content-type'] = 'application/json'
+ return resp
+
+
+def make_csv_response(status_code, csv_data, fname):
+ """Wrap CSV format to the reponse object."""
+ fname = '.'.join((fname, 'csv'))
+ resp = make_response(csv_data, status_code)
+ resp.mimetype = 'text/csv'
+ resp.headers['Content-Disposition'] = 'attachment; filename="%s"' % fname
+ return resp
diff --git a/compass-deck/api/v1/__init__.py b/compass-deck/api/v1/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/api/v1/__init__.py
diff --git a/compass-deck/api/v1/api.py b/compass-deck/api/v1/api.py
new file mode 100644
index 0000000..9dbc548
--- /dev/null
+++ b/compass-deck/api/v1/api.py
@@ -0,0 +1,248 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define all the RestfulAPI entry points."""
+import logging
+import simplejson as json
+
+from flask import Blueprint
+from flask import request
+
+from flask.ext.restful import Resource
+
+from compass.api.exception import BadRequest
+from compass.api.exception import Forbidden
+from compass.api.exception import ItemNotFound
+from compass.api.exception import Unauthorized
+from compass.api.restfulAPI import CompassApi
+from compass.api import utils
+
+from compass.db import db_api
+from compass.db.exception import InvalidParameter
+from compass.db.exception import RecordNotExists
+
+
+v1_app = Blueprint('v1_app', __name__)
+api = CompassApi(v1_app)
+PREFIX = '/v1.0'
+
+
+@v1_app.route('/users', methods=['GET'])
+def list_users():
+ """List details of all users filtered by user email and admin role."""
+
+ emails = request.args.getlist('email')
+ is_admin = request.args.get('admin')
+ filters = {}
+
+ if emails:
+ filters['email'] = emails
+
+ if is_admin is not None:
+ if is_admin == 'true':
+ filters['is_admin'] = True
+ elif is_admin == 'false':
+ filters['is_admin'] = False
+
+ users_list = db_api.user.list_users(filters)
+
+ return utils.make_json_response(200, users_list)
+
+
+class User(Resource):
+ ENDPOINT = PREFIX + '/users'
+
+ def get(self, user_id):
+ """Get user's information for the specified ID."""
+ try:
+ user_data = db_api.user.get_user(user_id)
+ logging.debug("user_data is===>%s", user_data)
+
+ except RecordNotExists as ex:
+ error_msg = ex.message
+ raise ItemNotFound(error_msg)
+
+ return utils.make_json_response(200, user_data)
+
+
+class Adapter(Resource):
+ ENDPOINT = PREFIX + "/adapters"
+
+ def get(self, adapter_id):
+ """Get information for a specified adapter."""
+
+ try:
+ adapter_info = db_api.adapter.get_adapter(adapter_id)
+ except RecordNotExists as ex:
+ error_msg = ex.message
+ raise ItemNotFound(error_msg)
+
+ return utils.make_json_response(200, adapter_info)
+
+
+@v1_app.route('/adapters', methods=['GET'])
+def list_adapters():
+ """List details of all adapters filtered by the adapter name(s)."""
+
+ names = request.args.getlist('name')
+ filters = {}
+ if names:
+ filters['name'] = names
+
+ adapters_list = db_api.adapter.list_adapters(filters)
+ return utils.make_json_response(200, adapters_list)
+
+
+@v1_app.route('/adapters/<int:adapter_id>/config-schema', methods=['GET'])
+def get_adapter_config_schema(adapter_id):
+ """Get the config schema for a specified adapter."""
+
+ os_id = request.args.get("os-id", type=int)
+
+ try:
+ schema = db_api.adapter.get_adapter_config_schema(adapter_id, os_id)
+ except RecordNotExists as ex:
+ raise ItemNotFound(ex.message)
+
+ return utils.make_json_response(200, schema)
+
+
+@v1_app.route('/adapters/<int:adapter_id>/roles', methods=['GET'])
+def get_adapter_roles(adapter_id):
+ """Get roles for a specified adapter."""
+
+ try:
+ roles = db_api.adapter.get_adapter(adapter_id, True)
+ except RecordNotExists as ex:
+ raise ItemNotFound(ex.message)
+
+ return utils.make_json_response(200, roles)
+
+
+class Cluster(Resource):
+ def get(self, cluster_id):
+ """Get information for a specified cluster."""
+
+ try:
+ cluster_info = db_api.cluster.get_cluster(cluster_id)
+
+ except RecordNotExists as ex:
+ error_msg = ex.message
+ raise ItemNotFound(error_msg)
+
+ return utils.make_json_response(200, cluster_info)
+
+
+@v1_app.route('/clusters/<int:cluster_id>/config', methods=['PUT', 'PATCH'])
+def add_cluster_config(cluster_id):
+ """Update the config information for a specified cluster."""
+ config = json.loads(request.data)
+ if not config:
+ raise BadRequest("Config cannot be None!")
+
+ root_elems = ['os_config', 'package_config']
+ if len(config.keys()) != 1 or config.keys()[0] not in root_elems:
+ error_msg = ("Config root elements must be either"
+ "'os_config' or 'package_config'")
+ raise BadRequest(error_msg)
+
+ result = None
+ is_patch_method = request.method == 'PATCH'
+ try:
+ if "os_config" in config:
+ result = db_api.cluster\
+ .update_cluster_config(cluster_id,
+ 'os_config',
+ config,
+ patch=is_patch_method)
+ elif "package_config" in config:
+ result = db_api.cluster\
+ .update_cluster_config(cluster_id,
+ 'package_config', config,
+ patch=is_patch_method)
+
+ except InvalidParameter as ex:
+ raise BadRequest(ex.message)
+
+ except RecordNotExists as ex:
+ raise ItemNotFound(ex.message)
+
+ return utils.make_json_response(200, result)
+
+
+api.add_resource(User,
+ '/users',
+ '/users/<int:user_id>')
+api.add_resource(Adapter,
+ '/adapters',
+ '/adapters/<int:adapter_id>')
+api.add_resource(Cluster,
+ '/clusters',
+ '/clusters/<int:cluster_id>')
+
+
+@v1_app.errorhandler(ItemNotFound)
+def handle_not_exist(error, failed_objs=None):
+ """Handler of ItemNotFound Exception."""
+
+ message = {'type': 'itemNotFound',
+ 'message': error.message}
+
+ if failed_objs and isinstance(failed_objs, dict):
+ message.update(failed_objs)
+
+ return utils.make_json_response(404, message)
+
+
+@v1_app.errorhandler(Unauthorized)
+def handle_invalid_user(error, failed_objs=None):
+ """Handler of Unauthorized Exception."""
+
+ message = {'type': 'unathorized',
+ 'message': error.message}
+
+ if failed_objs and isinstance(failed_objs, dict):
+ message.update(failed_objs)
+
+ return utils.make_json_response(401, message)
+
+
+@v1_app.errorhandler(Forbidden)
+def handle_no_permission(error, failed_objs=None):
+ """Handler of Forbidden Exception."""
+
+ message = {'type': 'Forbidden',
+ 'message': error.message}
+
+ if failed_objs and isinstance(failed_objs, dict):
+ message.update(failed_objs)
+
+ return utils.make_json_response(403, message)
+
+
+@v1_app.errorhandler(BadRequest)
+def handle_bad_request(error, failed_objs=None):
+ """Handler of badRequest Exception."""
+
+ message = {'type': 'badRequest',
+ 'message': error.message}
+
+ if failed_objs and isinstance(failed_objs, dict):
+ message.update(failed_objs)
+
+ return utils.make_json_response(400, message)
+
+
+if __name__ == '__main__':
+ v1_app.run(debug=True)
diff --git a/compass-deck/apiclient/__init__.py b/compass-deck/apiclient/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/apiclient/__init__.py
diff --git a/compass-deck/apiclient/example.py b/compass-deck/apiclient/example.py
new file mode 100755
index 0000000..4c01b98
--- /dev/null
+++ b/compass-deck/apiclient/example.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+# copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Example code to deploy a cluster by compass client api."""
+import os
+import re
+import sys
+import time
+
+# from compass.apiclient.restful import Client
+from restful import Client
+
+COMPASS_SERVER_URL = 'http://localhost/api'
+COMPASS_LOGIN_EMAIL = 'admin@huawei.com'
+COMPASS_LOGIN_PASSWORD = 'admin'
+SWITCH_IP = '172.29.8.40'
+SWITCH_SNMP_VERSION = '2c'
+SWITCH_SNMP_COMMUNITY = 'public'
+CLUSTER_NAME = 'test_cluster'
+HOST_NAME_PREFIX = 'host'
+SERVICE_USERNAME = 'service'
+SERVICE_PASSWORD = 'service'
+CONSOLE_USERNAME = 'console'
+CONSOLE_PASSWORD = 'console'
+HA_VIP = ''
+
+MANAGEMENT_IP_START = '10.145.88.130'
+MANAGEMENT_IP_END = '10.145.88.254'
+MANAGEMENT_IP_GATEWAY = '10.145.88.1'
+MANAGEMENT_NETMASK = '255.255.255.0'
+MANAGEMENT_NIC = 'eth0'
+MANAGEMENT_PROMISC = 0
+TENANT_IP_START = '192.168.10.130'
+TENANT_IP_END = '192.168.10.255'
+TENANT_IP_GATEWAY = '192.168.10.1'
+TENANT_NETMASK = '255.255.255.0'
+TENANT_NIC = 'eth0'
+TENANT_PROMISC = 0
+PUBLIC_IP_START = '12.234.32.130'
+PUBLIC_IP_END = '12.234.32.255'
+PUBLIC_IP_GATEWAY = '12.234.32.1'
+PUBLIC_NETMASK = '255.255.255.0'
+PUBLIC_NIC = 'eth1'
+PUBLIC_PROMISC = 1
+STORAGE_IP_START = '172.16.100.130'
+STORAGE_IP_END = '172.16.100.255'
+STORAGE_NETMASK = '255.255.255.0'
+STORAGE_IP_GATEWAY = '172.16.100.1'
+STORAGE_NIC = 'eth0'
+STORAGE_PROMISC = 0
+HOME_PERCENTAGE = 5
+TMP_PERCENTAGE = 5
+VAR_PERCENTAGE = 10
+HOST_OS = 'CentOS-6.5-x86_64'
+
+
+PRESET_VALUES = {
+ 'LANGUAGE': 'EN',
+ 'TIMEZONE': 'GMT',
+ 'HTTPS_PROXY': 'http://10.145.89.100:3128',
+ 'NO_PROXY': ['127.0.0.1'],
+ 'DOMAIN': 'ods.com',
+ 'NAMESERVERS': ['10.145.89.100'],
+ 'NTP_SERVER': '10.145.89.100',
+ 'GATEWAY': '10.145.88.1',
+ 'PROXY': 'http://10.145.89.100:3128',
+ 'OS_NAME_PATTERN': 'CentOS.*',
+ 'ADAPTER_NAME': 'openstack_icehouse',
+ 'FLAVOR_PATTERN': 'allinone.*',
+ 'ROLES_LIST': ['allinone-compute'],
+ 'MACHINES_TO_ADD': ['00:0c:29:a7:ea:4b'],
+ 'BUILD_TIMEOUT': 60,
+ 'SEARCH_PATH': ['ods.com'],
+ 'SERVER_USERNAME': 'root',
+ 'SERVER_PASSWORD': 'root'
+}
+for v in PRESET_VALUES:
+ if v in os.environ.keys():
+ PRESET_VALUES[v] = os.environ.get(v)
+ print (v + PRESET_VALUES[v] + " is set by env variables")
+ else:
+ print (PRESET_VALUES[v])
+
+# instantiate a client
+client = Client(COMPASS_SERVER_URL)
+
+# login
+status, response = client.login(COMPASS_LOGIN_EMAIL, COMPASS_LOGIN_PASSWORD)
+print '============================================================'
+print 'login status: %s response: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+# list all switches
+status, response = client.list_switches()
+print '============================================================='
+print 'get all switches status: %s response: %s' % (status, response)
+
+# add a switch
+status, response = client.add_switch(
+ SWITCH_IP,
+ SWITCH_SNMP_VERSION,
+ SWITCH_SNMP_COMMUNITY
+)
+print '============================================'
+print 'adding a switch..status: %s, response: %s' % (status, response)
+
+# if switch already exists, get one from all switches
+switch = None
+if status < 400:
+ switch = response
+else:
+ status, response = client.list_switches()
+ print '========================================='
+ print 'list switches status %s response %s' % (status, response)
+ if status >= 400:
+ sys.exit(1)
+ for switch_ in response:
+ if switch_['ip'] == SWITCH_IP:
+ switch = switch_
+ break
+
+switch_id = switch['id']
+switch_ip = switch['ip']
+print '======================'
+print 'switch has been set as %s' % switch_ip
+
+# wait till switch state becomes under_monitoring
+while switch['state'] != 'under_monitoring':
+ print 'waiting for state to become under_monitoring'
+ client.poll_switch(switch_id)
+ status, resp = client.get_switch(switch_id)
+ print '====================================='
+ print 'poll switch status %s response %s' % (status, resp)
+ switch = resp
+ print 'switch is in state: %s' % switch['state']
+ time.sleep(5)
+
+print '========================================='
+print 'switch state now is %s' % (switch['state'])
+
+# create a machine list
+machine_macs = {}
+machines = {}
+for machine in PRESET_VALUES['MACHINES_TO_ADD']:
+ status, response = client.list_machines(mac=machine)
+ print '============================================'
+ print 'list machines status %s response %s' % (status, response)
+ if status >= 400:
+ sys.exit(1)
+ if status == 200 and response != []:
+ machine_id = response[0]['id']
+ machine_macs[machine_id] = response[0]['mac']
+ machines = response
+
+print '================================='
+print 'found machines are : %s' % machines
+
+machines_to_add = PRESET_VALUES['MACHINES_TO_ADD']
+if set(machine_macs.values()) != set(machines_to_add):
+ print 'only found macs %s while expected are %s' % (
+ machine_macs.values(), machines_to_add)
+ sys.exit(1)
+
+# list all adapters
+status, response = client.list_adapters()
+print '==============================='
+print 'all adapters are: %s' % response
+if status >= 400:
+ sys.exit(1)
+
+adapters = response
+adapter_id = None
+os_id = None
+flavor_id = None
+adapter_name = PRESET_VALUES['ADPATER_NAME']
+os_pattern = re.compile(PRESET_VALUES['OS_NAME_PATTERN'])
+flavor_pattern = re.compile(PRESET_VALUES['FLAVOR_PATTERN'])
+for adapter in adapters:
+ if adapter_name == adapter['name']:
+ adapter_id = adapter['id']
+ for supported_os in adapter['supported_oses']:
+ if os_pattern.match(supported_os['name']):
+ os_id = supported_os['id']
+ break
+ for flavor in adapter['flavors']:
+ if flavor_pattern.match(flavor['name']):
+ flavor_id = flavor['id']
+ if adapter_id and os_id and flavor_id:
+ break
+
+print '======================================================='
+print 'using adapter %s os %s flavor %s to deploy cluster' % (
+ adapter_id, os_id, flavor_id
+)
+
+# add a cluster
+status, response = client.add_cluster(
+ CLUSTER_NAME,
+ adapter_id,
+ os_id,
+ flavor_id
+)
+print '==============================================================='
+print 'add cluster %s status %s: %s' % (CLUSTER_NAME, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.list_clusters(name=CLUSTER_NAME)
+print '================================================================'
+print 'list clusters %s status %s: %s' % (CLUSTER_NAME, status, response)
+if status >= 400:
+ sys.exit(1)
+
+cluster = response[0]
+cluster_id = cluster['id']
+
+print '=================='
+print 'cluster is %s' % cluster
+
+# Add hosts to the cluster
+machines_dict = {}
+machine_id_list = []
+for machine in machines:
+ id_mapping = {}
+ id_mapping['machine_id'] = machine['id']
+ machine_id_list.append(id_mapping)
+
+machines_dict['machines'] = machine_id_list
+
+status, response = client.add_hosts_to_cluster(
+ cluster_id, machines_dict
+)
+print '==================================='
+print 'add hosts %s to cluster status %s response %s' % (
+ machines_dict, status, response)
+if status >= 400:
+ sys.exit(1)
+
+# Add two subnets
+subnet_1 = '10.145.89.0/24'
+subnet_2 = '192.168.100.0/24'
+
+status, response = client.add_subnet(subnet_1)
+print '=================='
+print 'add subnet %s status %s: %s' % (subnet_1, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.add_subnet(subnet_2)
+print '=================='
+print 'add subnet %s status %s: %s' % (subnet_2, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, subnet1 = client.list_subnets(subnet=subnet_1)
+print '==========================================================='
+print 'list subnet %s status %s: %s' % (subnet_1, status, subnet1)
+if status >= 400:
+ sys.exit(1)
+
+status, subnet2 = client.list_subnets(subnet=subnet_2)
+print '==========================================================='
+print 'list subnet %s status %s: %s' % (subnet_2, status, subnet2)
+if status >= 400:
+ sys.exit(1)
+
+subnet1_id = subnet1[0]['id']
+subnet2_id = subnet2[0]['id']
+print '========================'
+print 'subnet1 has id: %s, subnet is %s' % (subnet1_id, subnet1)
+print 'subnet2 has id: %s, subnet is %s' % (subnet2_id, subnet2)
+
+# Add host network
+status, response = client.list_cluster_hosts(cluster_id)
+print '================================================'
+print 'list cluster hosts status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+host = response[0]
+host_id = host['id']
+print '=================='
+print 'host is: %s' % host
+
+status, response = client.add_host_network(
+ host_id,
+ 'eth0',
+ '10.145.89.200',
+ subnet1_id,
+ is_mgmt=True
+)
+print '======================='
+print 'add eth0 network status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.add_host_network(
+ host_id,
+ 'eth1',
+ '192.168.100.200',
+ subnet2_id,
+ is_promiscuous=True
+)
+print '======================='
+print 'add eth1 network status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+# Update os config to cluster
+cluster_os_config = {
+ 'general': {
+ 'language': PRESET_VALUES['LANGUAGE'],
+ 'timezone': PRESET_VALUES['TIMEZONE'],
+ 'http_proxy': PRESET_VALUES['PROXY'],
+ 'https_proxy': PRESET_VALUES['HTTPS_PROXY'],
+ 'no_proxy': PRESET_VALUES['NO_PROXY'],
+ 'ntp_server': PRESET_VALUES['NTP_SERVER'],
+ 'dns_servers': PRESET_VALUES['NAMESERVERS'],
+ 'domain': PRESET_VALUES['DOMAIN'],
+ 'search_path': PRESET_VALUES['SEARCH_PATH'],
+ 'default_gateway': PRESET_VALUES['GATEWAY']
+ },
+ 'server_credentials': {
+ 'username': PRESET_VALUES['SERVER_USERNAME'],
+ 'password': PRESET_VALUES['SERVER_PASSWORD']
+ },
+ 'partition': {
+ '/var': {
+ 'percentage': VAR_PERCENTAGE,
+ },
+ '/home': {
+ 'percentage': HOME_PERCENTAGE,
+ }
+ }
+}
+
+
+cluster_package_config = {
+ 'security': {
+ 'service_credentials': {
+ 'image': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'compute': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'dashboard': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'identity': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'metering': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'rabbitmq': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'volume': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'mysql': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ }
+ },
+ 'console_credentials': {
+ 'admin': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'compute': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'dashboard': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'image': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'metering': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'network': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'object-store': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'volume': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ }
+ }
+ },
+ 'network_mapping': {
+ 'management': MANAGEMENT_NIC,
+ 'tenant': TENANT_NIC,
+ 'storage': STORAGE_NIC,
+ 'public': PUBLIC_NIC
+ }
+}
+
+status, response = client.update_cluster_config(
+ cluster_id,
+ cluster_os_config,
+ cluster_package_config
+)
+
+print '======================================='
+print 'cluster %s update status %s: %s' % (
+ cluster_id, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.update_cluster_host(
+ cluster_id, host_id, roles=PRESET_VALUES['ROLES_LIST'])
+print '================================================='
+print 'update cluster host %s/%s status %s: %s' % (
+ cluster_id, host_id, status, response)
+if status >= 400:
+ sys.exit(1)
+
+# Review and deploy
+status, response = client.review_cluster(
+ cluster_id, review={'hosts': [host_id]})
+print '======================================='
+print 'reviewing cluster status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.deploy_cluster(
+ cluster_id, deploy={'hosts': [host_id]})
+print '======================================='
+print 'deploy cluster status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
diff --git a/compass-deck/apiclient/restful.py b/compass-deck/apiclient/restful.py
new file mode 100644
index 0000000..bb82922
--- /dev/null
+++ b/compass-deck/apiclient/restful.py
@@ -0,0 +1,1102 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass api client library.
+"""
+
+import json
+import logging
+import requests
+
+
+class Client(object):
+ """compass restful api wrapper"""
+
+ def __init__(self, url, headers=None, proxies=None, stream=None):
+ logging.info('create api client %s', url)
+ self.url_ = url
+ self.session_ = requests.Session()
+
+ if headers:
+ self.session_.headers.update(headers)
+ self.session_.headers.update({
+ 'Accept': 'application/json'
+ })
+
+ if proxies is not None:
+ self.session_.proxies = proxies
+
+ if stream is not None:
+ self.session_.stream = stream
+
+ def __del__(self):
+ self.session_.close()
+
+ @classmethod
+ def _get_response(cls, resp):
+ response_object = {}
+ try:
+ response_object = resp.json()
+ except Exception as error:
+ logging.error('failed to load object from %s: %s',
+ resp.url, resp.content)
+ logging.exception(error)
+ response_object['status'] = 'Json Parsing Failed'
+ response_object['message'] = resp.content
+
+ return resp.status_code, response_object
+
+ def _get(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('get %s with data %s', url, data)
+ if data:
+ resp = self.session_.get(url, params=data)
+ else:
+ resp = self.session_.get(url)
+
+ return self._get_response(resp)
+
+ def _post(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('post %s with data %s', url, data)
+ if data:
+ resp = self.session_.post(url, json.dumps(data))
+ else:
+ resp = self.session_.post(url)
+
+ return self._get_response(resp)
+
+ def _put(self, req_url, data=None):
+ """encapsulate put method."""
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('put %s with data %s', url, data)
+ if data:
+ resp = self.session_.put(url, json.dumps(data))
+ else:
+ resp = self.session_.put(url)
+
+ return self._get_response(resp)
+
+ def _patch(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('patch %s with data %s', url, data)
+ if data:
+ resp = self.session_.patch(url, json.dumps(data))
+ else:
+ resp = self.session_.patch(url)
+
+ return self._get_response(resp)
+
+ def _delete(self, req_url):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('delete %s', url)
+ return self._get_response(self.session_.delete(url))
+
+ def login(self, email, password):
+ credential = {}
+ credential['email'] = email
+ credential['password'] = password
+ return self._post('/users/login', data=credential)
+
+ def get_token(self, email, password):
+ credential = {}
+ credential['email'] = email
+ credential['password'] = password
+ status, resp = self._post('/users/token', data=credential)
+ if status < 400:
+ self.session_.headers.update({'X-Auth-Token': resp['token']})
+ return status, resp
+
+ def get_users(self):
+ users = self._get('/users')
+ return users
+
+ def list_switches(
+ self,
+ switch_ips=None,
+ switch_ip_networks=None):
+ """list switches."""
+ params = {}
+ if switch_ips:
+ params['switchIp'] = switch_ips
+
+ if switch_ip_networks:
+ params['switchIpNetwork'] = switch_ip_networks
+
+ switchlist = self._get('/switches', data=params)
+ return switchlist
+
+ def get_switch(self, switch_id):
+ return self._get('/switches/%s' % switch_id)
+
+ def add_switch(
+ self,
+ switch_ip,
+ version=None,
+ community=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['ip'] = switch_ip
+ data['credentials'] = {}
+ if version:
+ data['credentials']['version'] = version
+
+ if community:
+ data['credentials']['community'] = community
+
+ return self._post('/switches', data=data)
+
+ def update_switch(self, switch_id, state='initialized',
+ version='2c', community='public', raw_data={}):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ data['credentials'] = {}
+ if version:
+ data['credentials']['version'] = version
+
+ if community:
+ data['credentials']['community'] = community
+
+ if state:
+ data['state'] = state
+
+ return self._put('/switches/%s' % switch_id, data=data)
+
+ def delete_switch(self, switch_id):
+ return self._delete('/switches/%s' % switch_id)
+
+ def list_switch_machines(self, switch_id, port=None, vlans=None,
+ tag=None, location=None):
+ data = {}
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/switches/%s/machines' % switch_id, data=data)
+
+ def get_switch_machine(self, switch_id, machine_id):
+ return self._get('/switches/%s/machines/%s' % (switch_id, machine_id))
+
+ def list_switch_machines_hosts(self, switch_id, port=None, vlans=None,
+ mac=None, tag=None, location=None,
+ os_name=None, os_id=None):
+
+ data = {}
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/switches/%s/machines-hosts' % switch_id, data=data)
+
+ def add_switch_machine(self, switch_id, mac=None, port=None,
+ vlans=None, ipmi_credentials=None,
+ tag=None, location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if mac:
+ data['mac'] = mac
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._post('/switches/%s/machines' % switch_id, data=data)
+
+ def update_switch_machine(self, switch_id, machine_id, port=None,
+ vlans=None, ipmi_credentials=None, tag=None,
+ location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._put('/switches/%s/machines/%s' %
+ (switch_id, machine_id), data=data)
+
+ def delete_switch_machine(self, switch_id, machine_id):
+ return self._delete('/switches/%s/machines/%s' %
+ (switch_id, machine_id))
+
+ # test these
+ def poll_switch(self, switch_id):
+ data = {}
+ data['find_machines'] = None
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def add_group_switch_machines(self, switch_id, group_machine_ids):
+ data = {}
+ data['add_machines'] = group_machine_ids
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def remove_group_switch_machines(self, switch_id, group_machine_ids):
+ data = {}
+ data['remove_machines'] = group_machine_ids
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def update_group_switch_machines(self, switch_id, group_machines):
+ data = {}
+ data['set_machines'] = group_machines
+ return self._post('/switches/%s/action' % switch_id, data=data)
+ # end
+
+ def list_switchmachines(self, switch_ip_int=None, port=None, vlans=None,
+ mac=None, tag=None, location=None):
+ data = {}
+ if switch_ip_int:
+ data['switch_ip_int'] = switch_ip_int
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/switch-machines', data=data)
+
+ def list_switchmachines_hosts(self, switch_ip_int=None, port=None,
+ vlans=None, mac=None, tag=None,
+ location=None, os_name=None, os_id=None):
+
+ data = {}
+ if switch_ip_int:
+ data['switch_ip_int'] = switch_ip_int
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/switches-machines-hosts', data=data)
+
+ def show_switchmachine(self, switchmachine_id):
+ return self._get('/switch-machines/%s' % switchmachine_id)
+
+ def update_switchmachine(self, switchmachine_id,
+ port=None, vlans=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ return self._put('/switch-machines/%s' % switchmachine_id, data=data)
+
+ def patch_switchmachine(self, switchmachine_id,
+ vlans=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ elif vlans:
+ data['vlans'] = vlans
+
+ return self._patch('/switch-machines/%s' % switchmachine_id, data=data)
+
+ def delete_switchmachine(self, switchmachine_id):
+ return self._delete('/switch-machines/%s' % switchmachine_id)
+
+ def list_machines(self, mac=None, tag=None, location=None):
+ data = {}
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/machines', data=data)
+
+ def get_machine(self, machine_id):
+ data = {}
+ if id:
+ data['id'] = id
+
+ return self._get('/machines/%s' % machine_id, data=data)
+
+ def update_machine(self, machine_id, ipmi_credentials=None, tag=None,
+ location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._put('/machines/%s' % machine_id, data=data)
+
+ def patch_machine(self, machine_id, ipmi_credentials=None,
+ tag=None, location=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._patch('/machines/%s' % machine_id, data=data)
+
+ def delete_machine(self, machine_id):
+ return self._delete('machines/%s' % machine_id)
+
+ def list_subnets(self, subnet=None, name=None):
+ data = {}
+ if subnet:
+ data['subnet'] = subnet
+
+ if name:
+ data['name'] = name
+
+ return self._get('/subnets', data=data)
+
+ def get_subnet(self, subnet_id):
+ return self._get('/subnets/%s' % subnet_id)
+
+ def add_subnet(self, subnet, name=None, raw_data=None):
+ data = {}
+ data['subnet'] = subnet
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if name:
+ data['name'] = name
+
+ return self._post('/subnets', data=data)
+
+ def update_subnet(self, subnet_id, subnet=None,
+ name=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if subnet:
+ data['subnet'] = subnet
+
+ if name:
+ data['name'] = name
+ return self._put('/subnets/%s' % subnet_id, data=data)
+
+ def delete_subnet(self, subnet_id):
+ return self._delete('/subnets/%s' % subnet_id)
+
+ def list_adapters(self, name=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ return self._get('/adapters', data=data)
+
+ def get_adapter(self, adapter_id):
+ return self._get('/adapters/%s' % adapter_id)
+
+ def get_adapter_roles(self, adapter_id):
+ return self._get('/adapters/%s/roles' % adapter_id)
+
+ def get_adapter_metadata(self, adapter_id):
+ return self._get('/adapters/%s/metadata' % adapter_id)
+
+ def get_os_metadata(self, os_id):
+ return self._get('/oses/%s/metadata' % os_id)
+
+ def list_clusters(self, name=None, os_name=None,
+ owner=None,
+ adapter_id=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if owner:
+ data['owner'] = owner
+
+ if adapter_id:
+ data['adapter_id'] = adapter_id
+
+ return self._get('/clusters', data=data)
+
+ def get_cluster(self, cluster_id):
+ return self._get('/clusters/%s' % cluster_id)
+
+ def add_cluster(self, name, adapter_id, os_id,
+ flavor_id=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if flavor_id:
+ data['flavor_id'] = flavor_id
+ data['name'] = name
+ data['adapter_id'] = adapter_id
+ data['os_id'] = os_id
+
+ return self._post('/clusters', data=data)
+
+ def update_cluster(self, cluster_id, name=None,
+ reinstall_distributed_system=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_distributed_system:
+ data['reinstall_distributed_system'] = (
+ reinstall_distributed_system
+ )
+ return self._put('/clusters/%s' % cluster_id, data=data)
+
+ def delete_cluster(self, cluster_id):
+ return self._delete('/clusters/%s' % cluster_id)
+
+ def get_cluster_config(self, cluster_id):
+ return self._get('/clusters/%s/config' % cluster_id)
+
+ def get_cluster_metadata(self, cluster_id):
+ return self._get('/clusters/%s/metadata' % cluster_id)
+
+ def update_cluster_config(self, cluster_id, os_config=None,
+ package_config=None, config_step=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ if config_step:
+ data['config_step'] = config_step
+
+ return self._put('/clusters/%s/config' % cluster_id, data=data)
+
+ def patch_cluster_config(self, cluster_id, os_config=None,
+ package_config=None, config_step=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ if config_step:
+ data['config_step'] = config_step
+
+ return self._patch('/clusters/%s/config' % cluster_id, data=data)
+
+ def delete_cluster_config(self, cluster_id):
+ return self._delete('/clusters/%s/config' % cluster_id)
+
+ # test these
+ def add_hosts_to_cluster(self, cluster_id, hosts):
+ data = {}
+ data['add_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def set_hosts_in_cluster(self, cluster_id, hosts):
+ data = {}
+ data['set_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def remove_hosts_from_cluster(self, cluster_id, hosts):
+ data = {}
+ data['remove_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def review_cluster(self, cluster_id, review={}):
+ data = {}
+ data['review'] = review
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def deploy_cluster(self, cluster_id, deploy={}):
+ data = {}
+ data['deploy'] = deploy
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def redeploy_cluster(self, cluster_id, deploy={}):
+ data = {}
+ data['redeploy'] = deploy
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def get_cluster_state(self, cluster_id):
+ return self._get('/clusters/%s/state' % cluster_id)
+
+ def list_cluster_hosts(self, cluster_id):
+ return self._get('/clusters/%s/hosts' % cluster_id)
+
+ def list_clusterhosts(self):
+ return self._get('/clusterhosts')
+
+ def get_cluster_host(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s' % (cluster_id, host_id))
+
+ def get_clusterhost(self, clusterhost_id):
+ return self._get('/clusterhosts/%s' % clusterhost_id)
+
+ def add_cluster_host(self, cluster_id, machine_id=None, name=None,
+ reinstall_os=None, raw_data=None):
+ data = {}
+ data['machine_id'] = machine_id
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_os:
+ data['reinstall_os'] = reinstall_os
+
+ return self._post('/clusters/%s/hosts' % cluster_id, data=data)
+
+ def delete_cluster_host(self, cluster_id, host_id):
+ return self._delete('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id))
+
+ def delete_clusterhost(self, clusterhost_id):
+ return self._delete('/clusterhosts/%s' % clusterhost_id)
+
+ def get_cluster_host_config(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id))
+
+ def get_clusterhost_config(self, clusterhost_id):
+ return self._get('/clusterhosts/%s/config' % clusterhost_id)
+
+ def update_cluster_host_config(self, cluster_id, host_id,
+ os_config=None,
+ package_config=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._put('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id), data=data)
+
+ def update_clusterhost_config(self, clusterhost_id, os_config=None,
+ package_config=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._put('/clusterhosts/%s/config' % clusterhost_id,
+ data=data)
+
+ def patch_cluster_host_config(self, cluster_id, host_id,
+ os_config=None,
+ package_config=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._patch('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id), data=data)
+
+ def patch_clusterhost_config(self, clusterhost_id, os_config=None,
+ package_config=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._patch('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def delete_cluster_host_config(self, cluster_id, host_id):
+ return self._delete('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id))
+
+ def delete_clusterhost_config(self, clusterhost_id):
+ return self._delete('/clusterhosts/%s/config' % clusterhost_id)
+
+ def get_cluster_host_state(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s/state' %
+ (cluster_id, host_id))
+
+ def update_cluster_host(self, cluster_id, host_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id), data=data)
+
+ def update_clusterhost(self, clusterhost_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def patch_cluster_host(self, cluster_id, host_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._patch('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id), data=data)
+
+ def patch_clusterhost(self, clusterhost_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._patch('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def get_clusterhost_state(self, clusterhost_id):
+ return self._get('/clusterhosts/%s/state' % clusterhost_id)
+
+ def update_cluster_host_state(self, cluster_id, host_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/clusters/%s/hosts/%s/state' % (cluster_id, host_id),
+ data=data)
+
+ def update_clusterhost_state(self, clusterhost_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/clusterhosts/%s/state' % clusterhost_id, data=data)
+
+ def list_hosts(self, name=None, os_name=None, owner=None, mac=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if owner:
+ data['owner'] = owner
+
+ if mac:
+ data['mac'] = mac
+
+ return self._get('/hosts', data=data)
+
+ def get_host(self, host_id):
+ return self._get('/hosts/%s' % host_id)
+
+ def list_machines_or_hosts(self, mac=None, tag=None,
+ location=None, os_name=None,
+ os_id=None):
+ data = {}
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/machines-hosts', data=data)
+
+ def get_machine_or_host(self, host_id):
+ return self._get('/machines-hosts/%s' % host_id)
+
+ def update_host(self, host_id, name=None,
+ reinstall_os=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_os:
+ data['reinstall_os'] = reinstall_os
+
+ return self._put('/hosts/%s' % host_id, data=data)
+
+ def delete_host(self, host_id):
+ return self._delete('/hosts/%s' % host_id)
+
+ def get_host_clusters(self, host_id):
+ return self._get('/hosts/%s/clusters' % host_id)
+
+ def get_host_config(self, host_id):
+ return self._get('/hosts/%s/config' % host_id)
+
+ def update_host_config(self, host_id, os_config, raw_data=None):
+ data = {}
+ data['os_config'] = os_config
+ if raw_data:
+ data.update(raw_data)
+
+ return self._put('/hosts/%s/config' % host_id, data=data)
+
+ def patch_host_config(self, host_id, os_config, raw_data=None):
+ data = {}
+ data['os_config'] = os_config
+ if raw_data:
+ data.update(raw_data)
+
+ return self._patch('/hosts/%s/config' % host_id, data=data)
+
+ def delete_host_config(self, host_id):
+ return self._delete('/hosts/%s/config' % host_id)
+
+ def list_host_networks(self, host_id, interface=None, ip=None,
+ subnet=None, is_mgmt=None, is_promiscuous=None):
+ data = {}
+ if interface:
+ data['interface'] = interface
+
+ if ip:
+ data['ip'] = ip
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._get('/hosts/%s/networks' % host_id, data=data)
+
+ def list_all_host_networks(self, interface=None, ip=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None):
+ data = {}
+ if interface:
+ data['interface'] = interface
+
+ if ip:
+ data['ip'] = ip
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._get('/host-networks', data=data)
+
+ def get_host_network(self, host_id, host_network_id):
+ return self._get('/hosts/%s/networks/%s' %
+ (host_id, host_network_id))
+
+ def get_network_for_all_hosts(self, host_network_id):
+ return self._get('/host-networks/%s' % host_network_id)
+
+ def add_host_network(self, host_id, interface, ip, subnet_id,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ data['interface'] = interface
+ data['ip'] = ip
+ data['subnet_id'] = subnet_id
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._post('/hosts/%s/networks' % host_id, data=data)
+
+ def update_host_network(self, host_id, host_network_id,
+ ip=None, subnet_id=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ip:
+ data['ip'] = ip
+
+ if subnet_id:
+ data['subnet_id'] = subnet_id
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._put('/hosts/%s/networks/%s' %
+ (host_id, host_network_id), data=data)
+
+ def update_hostnetwork(self, host_network_id, ip=None,
+ subnet_id=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ip:
+ data['ip'] = ip
+
+ if subnet_id:
+ data['subnet_id'] = subnet_id
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._put('/host-networks/%s' % host_network_id,
+ data=data)
+
+ def delete_host_network(self, host_id, host_network_id):
+ return self._delete('/hosts/%s/networks/%s',
+ (host_id, host_network_id))
+
+ def delete_hostnetwork(self, host_network_id):
+ return self._delete('/host-networks/%s' % host_network_id)
+
+ def get_host_state(self, host_id):
+ return self._get('/hosts/%s/state' % host_id)
+
+ def update_host_state(self, host_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/hosts/%s/state' % host_id, date=data)
+
+ def poweron_host(self, host_id):
+ data = {}
+ data['poweron'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def poweroff_host(self, host_id):
+ data = {}
+ data['poweroff'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def reset_host(self, host_id):
+ data = {}
+ data['reset'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def clusterhost_ready(self, clusterhost_name):
+ data = {}
+ data['ready'] = True
+
+ return self._post('/clusterhosts/%s/state_internal' %
+ clusterhost_name, data=data)
diff --git a/compass-deck/apiclient/v1/__init__.py b/compass-deck/apiclient/v1/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-deck/apiclient/v1/__init__.py
diff --git a/compass-deck/apiclient/v1/example.py b/compass-deck/apiclient/v1/example.py
new file mode 100755
index 0000000..6f7a7f7
--- /dev/null
+++ b/compass-deck/apiclient/v1/example.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Example code to deploy a cluster by compass client api."""
+import os
+import re
+import requests
+import sys
+import time
+
+from compass.apiclient.restful import Client
+
+
+COMPASS_SERVER_URL = 'http://127.0.0.1/api'
+SWITCH_IP = '10.145.81.220'
+SWITCH_SNMP_VERSION = 'v2c'
+SWITCH_SNMP_COMMUNITY = 'public'
+# MACHINES_TO_ADD = ['00:11:20:30:40:01']
+CLUSTER_NAME = 'cluster2'
+HOST_NAME_PREFIX = 'host'
+SERVER_USERNAME = 'root'
+SERVER_PASSWORD = 'root'
+SERVICE_USERNAME = 'service'
+SERVICE_PASSWORD = 'service'
+CONSOLE_USERNAME = 'console'
+CONSOLE_PASSWORD = 'console'
+HA_VIP = ''
+# NAMESERVERS = '192.168.10.6'
+SEARCH_PATH = 'ods.com'
+# GATEWAY = '192.168.10.6'
+# PROXY = 'http://192.168.10.6:3128'
+# NTP_SERVER = '192.168.10.6'
+MANAGEMENT_IP_START = '192.168.10.130'
+MANAGEMENT_IP_END = '192.168.10.254'
+MANAGEMENT_IP_GATEWAY = '192.168.10.1'
+MANAGEMENT_NETMASK = '255.255.255.0'
+MANAGEMENT_NIC = 'eth0'
+MANAGEMENT_PROMISC = 0
+TENANT_IP_START = '192.168.10.100'
+TENANT_IP_END = '192.168.10.255'
+TENANT_IP_GATEWAY = '192.168.10.1'
+TENANT_NETMASK = '255.255.255.0'
+TENANT_NIC = 'eth0'
+TENANT_PROMISC = 0
+PUBLIC_IP_START = '12.234.32.100'
+PUBLIC_IP_END = '12.234.32.255'
+PUBLIC_IP_GATEWAY = '12.234.32.1'
+PUBLIC_NETMASK = '255.255.255.0'
+PUBLIC_NIC = 'eth1'
+PUBLIC_PROMISC = 1
+STORAGE_IP_START = '172.16.100.100'
+STORAGE_IP_END = '172.16.100.255'
+STORAGE_NETMASK = '255.255.255.0'
+STORAGE_IP_GATEWAY = '172.16.100.1'
+STORAGE_NIC = 'eth0'
+STORAGE_PROMISC = 0
+HOME_PERCENTAGE = 5
+TMP_PERCENTAGE = 5
+VAR_PERCENTAGE = 10
+# ROLES_LIST = [['os-dashboard']]
+
+PRESET_VALUES = {
+ 'NAMESERVERS': '192.168.10.1',
+ 'NTP_SERVER': '192.168.10.1',
+ 'GATEWAY': '192.168.10.1',
+ 'PROXY': 'http://192.168.10.1:3128',
+ 'ROLES_LIST': 'os-dashboard',
+ 'MACHINES_TO_ADD': '00:11:20:30:40:01',
+ 'BUILD_TIMEOUT': 60
+}
+for v in PRESET_VALUES:
+ if v in os.environ.keys():
+ PRESET_VALUES[v] = os.environ.get(v)
+ print (v + PRESET_VALUES[v] + " is set by env variables")
+ else:
+ print (PRESET_VALUES[v])
+
+# get apiclient object.
+client = Client(COMPASS_SERVER_URL)
+
+
+# get all switches.
+status, resp = client.get_switches()
+print 'get all switches status: %s resp: %s' % (status, resp)
+
+# add a switch.
+status, resp = client.add_switch(
+ SWITCH_IP, version=SWITCH_SNMP_VERSION,
+ community=SWITCH_SNMP_COMMUNITY)
+
+print 'add a switch status: %s resp: %s' % (status, resp)
+
+if status < 400:
+ switch = resp['switch']
+else:
+ status, resp = client.get_switches()
+ print 'get all switches status: %s resp: %s' % (status, resp)
+ switch = None
+ for switch in resp['switches']:
+ if switch['ip'] == SWITCH_IP:
+ break
+
+switch_id = switch['id']
+switch_ip = switch['ip']
+
+
+# if the switch is not in under_monitoring, wait for the poll switch task
+# update the swich information and change the switch state.
+while switch['state'] != 'under_monitoring':
+ print 'waiting for the switch into under_monitoring'
+ status, resp = client.get_switch(switch_id)
+ print 'get switch %s status: %s, resp: %s' % (switch_id, status, resp)
+ switch = resp['switch']
+ time.sleep(10)
+
+
+# get machines connected to the switch.
+status, resp = client.get_machines(switch_id=switch_id)
+print 'get all machines under switch %s status: %s, resp: %s' % (
+ switch_id, status, resp)
+machines = {}
+MACHINES_TO_ADD = PRESET_VALUES['MACHINES_TO_ADD'].split()
+for machine in resp['machines']:
+ mac = machine['mac']
+ if mac in MACHINES_TO_ADD:
+ machines[machine['id']] = mac
+
+print 'machine to add: %s' % machines
+
+if set(machines.values()) != set(MACHINES_TO_ADD):
+ print 'only found macs %s while expected are %s' % (
+ machines.values(), MACHINES_TO_ADD)
+ sys.exit(1)
+
+
+# get adapters.
+status, resp = client.get_adapters()
+print 'get all adapters status: %s, resp: %s' % (status, resp)
+adapter_ids = []
+for adapter in resp['adapters']:
+ adapter_ids.append(adapter['id'])
+
+adapter_id = adapter_ids[0]
+print 'adpater for deploying a cluster: %s' % adapter_id
+
+
+# add a cluster.
+status, resp = client.add_cluster(
+ cluster_name=CLUSTER_NAME, adapter_id=adapter_id)
+print 'add cluster %s status: %s, resp: %s' % (CLUSTER_NAME, status, resp)
+cluster = resp['cluster']
+cluster_id = cluster['id']
+
+# add hosts to the cluster.
+status, resp = client.add_hosts(
+ cluster_id=cluster_id,
+ machine_ids=machines.keys())
+print 'add hosts to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+host_ids = []
+for host in resp['cluster_hosts']:
+ host_ids.append(host['id'])
+
+print 'added hosts: %s' % host_ids
+
+
+# set cluster security
+status, resp = client.set_security(
+ cluster_id, server_username=SERVER_USERNAME,
+ server_password=SERVER_PASSWORD,
+ service_username=SERVICE_USERNAME,
+ service_password=SERVICE_PASSWORD,
+ console_username=CONSOLE_USERNAME,
+ console_password=CONSOLE_PASSWORD)
+print 'set security config to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+
+
+# set cluster networking
+status, resp = client.set_networking(
+ cluster_id,
+ nameservers=PRESET_VALUES["NAMESERVERS"],
+ search_path=SEARCH_PATH,
+ gateway=PRESET_VALUES["GATEWAY"],
+ proxy=PRESET_VALUES["PROXY"],
+ ntp_server=PRESET_VALUES["NTP_SERVER"],
+ ha_vip=HA_VIP,
+ management_ip_start=MANAGEMENT_IP_START,
+ management_ip_end=MANAGEMENT_IP_END,
+ management_netmask=MANAGEMENT_NETMASK,
+ management_nic=MANAGEMENT_NIC,
+ management_gateway=MANAGEMENT_IP_GATEWAY,
+ management_promisc=MANAGEMENT_PROMISC,
+ tenant_ip_start=TENANT_IP_START,
+ tenant_ip_end=TENANT_IP_END,
+ tenant_netmask=TENANT_NETMASK,
+ tenant_nic=TENANT_NIC,
+ tenant_gateway=TENANT_IP_GATEWAY,
+ tenant_promisc=TENANT_PROMISC,
+ public_ip_start=PUBLIC_IP_START,
+ public_ip_end=PUBLIC_IP_END,
+ public_netmask=PUBLIC_NETMASK,
+ public_nic=PUBLIC_NIC,
+ public_gateway=PUBLIC_IP_GATEWAY,
+ public_promisc=PUBLIC_PROMISC,
+ storage_ip_start=STORAGE_IP_START,
+ storage_ip_end=STORAGE_IP_END,
+ storage_netmask=STORAGE_NETMASK,
+ storage_nic=STORAGE_NIC,
+ storage_gateway=STORAGE_IP_GATEWAY,
+ storage_promisc=STORAGE_PROMISC)
+print 'set networking config to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+
+
+# set partiton of each host in cluster
+status, resp = client.set_partition(
+ cluster_id,
+ home_percentage=HOME_PERCENTAGE,
+ tmp_percentage=TMP_PERCENTAGE,
+ var_percentage=VAR_PERCENTAGE)
+print 'set partition config to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+
+
+# set each host config in cluster.
+ROLES_LIST = [PRESET_VALUES['ROLES_LIST'].split()]
+for host_id in host_ids:
+ if ROLES_LIST:
+ roles = ROLES_LIST.pop(0)
+ else:
+ roles = []
+ status, resp = client.update_host_config(
+ host_id, hostname='%s%s' % (HOST_NAME_PREFIX, host_id),
+ roles=roles)
+ print 'set roles to host %s status: %s, resp: %s' % (
+ host_id, status, resp)
+
+
+# deploy cluster.
+status, resp = client.deploy_hosts(cluster_id)
+print 'deploy cluster %s status: %s, resp: %s' % (cluster_id, status, resp)
+
+
+# get intalling progress.
+BUILD_TIMEOUT = float(PRESET_VALUES['BUILD_TIMEOUT'])
+timeout = time.time() + BUILD_TIMEOUT * 60
+while True:
+ status, resp = client.get_cluster_installing_progress(cluster_id)
+ print 'get cluster %s installing progress status: %s, resp: %s' % (
+ cluster_id, status, resp)
+ progress = resp['progress']
+ if (
+ progress['state'] not in ['UNINITIALIZED', 'INSTALLING'] or
+ progress['percentage'] >= 1.0
+ ):
+ break
+ if (
+ time.time() > timeout
+ ):
+ raise Exception("Timeout! The system is not ready in time.")
+
+ for host_id in host_ids:
+ status, resp = client.get_host_installing_progress(host_id)
+ print 'get host %s installing progress status: %s, resp: %s' % (
+ host_id, status, resp)
+
+ time.sleep(60)
+
+
+status, resp = client.get_dashboard_links(cluster_id)
+print 'get cluster %s dashboardlinks status: %s, resp: %s' % (
+ cluster_id, status, resp)
+dashboardlinks = resp['dashboardlinks']
+if not dashboardlinks.keys():
+ raise Exception("Dashboard link is not found!")
+for x in dashboardlinks.keys():
+ if x in ("os-dashboard", "os-controller"):
+ dashboardurl = dashboardlinks.get(x)
+ if dashboardurl is None:
+ raise Exception("No dashboard link is found")
+ r = requests.get(dashboardurl, verify=False)
+ r.raise_for_status()
+ match = re.search(
+ r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)', r.text)
+ if match:
+ print 'dashboard login page can be downloaded'
+ break
+ print (
+ 'dashboard login page failed to be downloaded\n'
+ 'the context is:\n%s\n') % r.text
+ raise Exception("os-dashboard is not properly installed!")
diff --git a/compass-deck/apiclient/v1/restful.py b/compass-deck/apiclient/v1/restful.py
new file mode 100644
index 0000000..3fb235c
--- /dev/null
+++ b/compass-deck/apiclient/v1/restful.py
@@ -0,0 +1,655 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass api client library.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import json
+import logging
+import requests
+
+
+class Client(object):
+ """wrapper for compass restful api.
+
+ .. note::
+ Every api client method returns (status as int, resp as dict).
+ If the api succeeds, the status is 2xx, the resp includes
+ {'status': 'OK'} and other keys depend on method.
+ If the api fails, the status is 4xx, the resp includes {
+ 'status': '...', 'message': '...'}
+ """
+
+ def __init__(self, url, headers=None, proxies=None, stream=None):
+ """Restful api client initialization.
+
+ :param url: url to the compass web service.
+ :type url: str.
+ :param headers: http header sent in each restful request.
+ :type headers: dict of header name (str) to heade value (str).
+ :param proxies: the proxy address for each protocol.
+ :type proxies: dict of protocol (str) to proxy url (str).
+ :param stream: wether the restful response should be streamed.
+ :type stream: bool.
+ """
+ self.url_ = url
+ self.session_ = requests.Session()
+ if headers:
+ self.session_.headers = headers
+
+ if proxies is not None:
+ self.session_.proxies = proxies
+
+ if stream is not None:
+ self.session_.stream = stream
+
+ def __del__(self):
+ self.session_.close()
+
+ @classmethod
+ def _get_response(cls, resp):
+ """decapsulate the resp to status code and python formatted data."""
+ resp_obj = {}
+ try:
+ resp_obj = resp.json()
+ except Exception as error:
+ logging.error('failed to load object from %s: %s',
+ resp.url, resp.content)
+ logging.exception(error)
+ resp_obj['status'] = 'Json Parsing Failure'
+ resp_obj['message'] = resp.content
+
+ return resp.status_code, resp_obj
+
+ def _get(self, relative_url, params=None):
+ """encapsulate get method."""
+ url = '%s%s' % (self.url_, relative_url)
+ if params:
+ resp = self.session_.get(url, params=params)
+ else:
+ resp = self.session_.get(url)
+
+ return self._get_response(resp)
+
+ def _post(self, relative_url, data=None):
+ """encapsulate post method."""
+ url = '%s%s' % (self.url_, relative_url)
+ if data:
+ resp = self.session_.post(url, json.dumps(data))
+ else:
+ resp = self.session_.post(url)
+
+ return self._get_response(resp)
+
+ def _put(self, relative_url, data=None):
+ """encapsulate put method."""
+ url = '%s%s' % (self.url_, relative_url)
+ if data:
+ resp = self.session_.put(url, json.dumps(data))
+ else:
+ resp = self.session_.put(url)
+
+ return self._get_response(resp)
+
+ def _delete(self, relative_url):
+ """encapsulate delete method."""
+ url = '%s%s' % (self.url_, relative_url)
+ return self._get_response(self.session_.delete(url))
+
+ def get_switches(self, switch_ips=None, switch_networks=None, limit=None):
+ """List details for switches.
+
+ .. note::
+ The switches can be filtered by switch_ips, siwtch_networks and
+ limit. These params can be None or missing. If the param is None
+ or missing, that filter will be ignored.
+
+ :param switch_ips: Filter switch(es) with IP(s).
+ :type switch_ips: list of str. Each is as 'xxx.xxx.xxx.xxx'.
+ :param switch_networks: Filter switche(es) with network(s).
+ :type switch_networks: list of str. Each is as 'xxx.xxx.xxx.xxx/xx'.
+ :param limit: int, The maximum number of switches to return.
+ :type limit: int. 0 means unlimited.
+ """
+ params = {}
+ if switch_ips:
+ params['switchIp'] = switch_ips
+
+ if switch_networks:
+ params['switchIpNetwork'] = switch_networks
+
+ if limit:
+ params['limit'] = limit
+ return self._get('/switches', params=params)
+
+ def get_switch(self, switch_id):
+ """Lists details for a specified switch.
+
+ :param switch_id: switch id.
+ :type switch_id: int.
+ """
+ return self._get('/switches/%s' % switch_id)
+
+ def add_switch(self, switch_ip, version=None, community=None,
+ username=None, password=None, raw_data=None):
+ """Create a switch with specified details.
+
+ .. note::
+ It will trigger switch polling if successful. During
+ the polling, MAC address of the devices connected to the
+ switch will be learned by SNMP or SSH.
+
+ :param switch_ip: the switch IP address.
+ :type switch_ip: str, as xxx.xxx.xxx.xxx.
+ :param version: SNMP version when using SNMP to poll switch.
+ :type version: str, one in ['v1', 'v2c', 'v3']
+ :param community: SNMP community when using SNMP to poll switch.
+ :type community: str, usually 'public'.
+ :param username: SSH username when using SSH to poll switch.
+ :type username: str.
+ :param password: SSH password when using SSH to poll switch.
+ :type password: str.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['switch'] = {}
+ data['switch']['ip'] = switch_ip
+ data['switch']['credential'] = {}
+ if version:
+ data['switch']['credential']['version'] = version
+
+ if community:
+ data['switch']['credential']['community'] = community
+
+ if username:
+ data['switch']['credential']['username'] = username
+
+ if password:
+ data['switch']['credential']['password'] = password
+
+ return self._post('/switches', data=data)
+
+ def update_switch(self, switch_id, ip_addr=None,
+ version=None, community=None,
+ username=None, password=None,
+ raw_data=None):
+ """Updates a switch with specified details.
+
+ .. note::
+ It will trigger switch polling if successful. During
+ the polling, MAC address of the devices connected to the
+ switch will be learned by SNMP or SSH.
+
+ :param switch_id: switch id
+ :type switch_id: int.
+ :param ip_addr: the switch ip address.
+ :type ip_addr: str, as 'xxx.xxx.xxx.xxx' format.
+ :param version: SNMP version when using SNMP to poll switch.
+ :type version: str, one in ['v1', 'v2c', 'v3'].
+ :param community: SNMP community when using SNMP to poll switch.
+ :type community: str, usually be 'public'.
+ :param username: username when using SSH to poll switch.
+ :type username: str.
+ :param password: password when using SSH to poll switch.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['switch'] = {}
+ if ip_addr:
+ data['switch']['ip'] = ip_addr
+
+ data['switch']['credential'] = {}
+ if version:
+ data['switch']['credential']['version'] = version
+
+ if community:
+ data['switch']['credential']['community'] = community
+
+ if username:
+ data['switch']['credential']['username'] = username
+
+ if password:
+ data['switch']['credential']['password'] = password
+
+ return self._put('/switches/%s' % switch_id, data=data)
+
+ def delete_switch(self, switch_id):
+ """Not implemented in api."""
+ return self._delete('/switches/%s' % switch_id)
+
+ def get_machines(self, switch_id=None, vlan_id=None,
+ port=None, limit=None):
+ """Get the details of machines.
+
+ .. note::
+ The machines can be filtered by switch_id, vlan_id, port
+ and limit. These params can be None or missing. If the param
+ is None or missing, the filter will be ignored.
+
+ :param switch_id: Return machine(s) connected to the switch.
+ :type switch_id: int.
+ :param vlan_id: Return machine(s) belonging to the vlan.
+ :type vlan_id: int.
+ :param port: Return machine(s) connect to the port.
+ :type port: int.
+ :param limit: the maximum number of machines will be returned.
+ :type limit: int. 0 means no limit.
+ """
+ params = {}
+ if switch_id:
+ params['switchId'] = switch_id
+
+ if vlan_id:
+ params['vlanId'] = vlan_id
+
+ if port:
+ params['port'] = port
+
+ if limit:
+ params['limit'] = limit
+
+ return self._get('/machines', params=params)
+
+ def get_machine(self, machine_id):
+ """Lists the details for a specified machine.
+
+ :param machine_id: Return machine with the id.
+ :type machine_id: int.
+ """
+ return self._get('/machines/%s' % machine_id)
+
+ def get_clusters(self):
+ """Lists the details for all clusters."""
+ return self._get('/clusters')
+
+ def get_cluster(self, cluster_id):
+ """Lists the details of the specified cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ """
+ return self._get('/clusters/%d' % cluster_id)
+
+ def add_cluster(self, cluster_name, adapter_id, raw_data=None):
+ """Creates a cluster by specified name and given adapter id.
+
+ :param cluster_name: cluster name.
+ :type cluster_name: str.
+ :param adapter_id: adapter id.
+ :type adapter_id: int.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['cluster'] = {}
+ data['cluster']['name'] = cluster_name
+ data['cluster']['adapter_id'] = adapter_id
+ return self._post('/clusters', data=data)
+
+ def add_hosts(self, cluster_id, machine_ids, raw_data=None):
+ """add the specified machine(s) as the host(s) to the cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param machine_ids: machine ids to add to cluster.
+ :type machine_ids: list of int, each is the id of one machine.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['addHosts'] = machine_ids
+ return self._post('/clusters/%d/action' % cluster_id, data=data)
+
+ def remove_hosts(self, cluster_id, host_ids, raw_data=None):
+ """remove the specified host(s) from the cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param host_ids: host ids to remove from cluster.
+ :type host_ids: list of int, each is the id of one host.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['removeHosts'] = host_ids
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def replace_hosts(self, cluster_id, machine_ids, raw_data=None):
+ """replace the cluster hosts with the specified machine(s).
+
+ :param cluster_id: int, The unique identifier of the cluster.
+ :type cluster_id: int.
+ :param machine_ids: the machine ids to replace the hosts in cluster.
+ :type machine_ids: list of int, each is the id of one machine.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['replaceAllHosts'] = machine_ids
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def deploy_hosts(self, cluster_id, raw_data=None):
+ """Deploy the cluster.
+
+ :param cluster_id: The unique identifier of the cluster
+ :type cluster_id: int.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['deploy'] = []
+ return self._post('/clusters/%d/action' % cluster_id, data=data)
+
+ @classmethod
+ def parse_security(cls, kwargs):
+ """parse the arguments to security data."""
+ data = {}
+ for key, value in kwargs.items():
+ if '_' not in key:
+ continue
+ key_name, key_value = key.split('_', 1)
+ data.setdefault(
+ '%s_credentials' % key_name, {})[key_value] = value
+
+ return data
+
+ def set_security(self, cluster_id, **kwargs):
+ """Update the cluster security configuration.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param <security_name>_username: username of the security name.
+ :type <security_name>_username: str.
+ :param <security_name>_password: passowrd of the security name.
+ :type <security_name>_password: str.
+
+ .. note::
+ security_name should be one of ['server', 'service', 'console'].
+ """
+ data = {}
+ data['security'] = self.parse_security(kwargs)
+ return self._put('/clusters/%d/security' % cluster_id, data=data)
+
+ @classmethod
+ def parse_networking(cls, kwargs):
+ """parse arguments to network data."""
+ data = {}
+ global_keys = [
+ 'nameservers', 'search_path', 'gateway',
+ 'proxy', 'ntp_server', 'ha_vip']
+ for key, value in kwargs.items():
+ if key in global_keys:
+ data.setdefault('global', {})[key] = value
+ else:
+ if '_' not in key:
+ continue
+
+ key_name, key_value = key.split('_', 1)
+ data.setdefault(
+ 'interfaces', {}
+ ).setdefault(
+ key_name, {}
+ )[key_value] = value
+
+ return data
+
+ def set_networking(self, cluster_id, **kwargs):
+ """Update the cluster network configuration.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param nameservers: comma seperated nameserver ip address.
+ :type nameservers: str.
+ :param search_path: comma seperated dns name search path.
+ :type search_path: str.
+ :param gateway: gateway ip address for routing to outside.
+ :type gateway: str.
+ :param proxy: proxy url for downloading packages.
+ :type proxy: str.
+ :param ntp_server: ntp server ip address to sync timestamp.
+ :type ntp_server: str.
+ :param ha_vip: ha vip address to run ha proxy.
+ :type ha_vip: str.
+ :param <interface>_ip_start: start ip address to host's interface.
+ :type <interface>_ip_start: str.
+ :param <interface>_ip_end: end ip address to host's interface.
+ :type <interface>_ip_end: str.
+ :param <interface>_netmask: netmask to host's interface.
+ :type <interface>_netmask: str.
+ :param <interface>_nic: host physical interface name.
+ :type <interface>_nic: str.
+ :param <interface>_promisc: if the interface in promiscous mode.
+ :type <interface>_promisc: int, 0 or 1.
+
+ .. note::
+ interface should be one of ['management', 'tenant',
+ 'public', 'storage'].
+ """
+ data = {}
+ data['networking'] = self.parse_networking(kwargs)
+ return self._put('/clusters/%d/networking' % cluster_id, data=data)
+
+ @classmethod
+ def parse_partition(cls, kwargs):
+ """parse arguments to partition data."""
+ data = {}
+ for key, value in kwargs.items():
+ if key.endswith('_percentage'):
+ key_name = key[:-len('_percentage')]
+ data[key_name] = '%s%%' % value
+ elif key.endswitch('_mbytes'):
+ key_name = key[:-len('_mbytes')]
+ data[key_name] = str(value)
+
+ return ';'.join([
+ '/%s %s' % (key, value) for key, value in data.items()
+ ])
+
+ def set_partition(self, cluster_id, **kwargs):
+ """Update the cluster partition configuration.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param <partition>_percentage: the partiton percentage.
+ :type <partition>_percentage: float between 0 to 100.
+ :param <partition>_mbytes: the partition mbytes.
+ :type <partition>_mbytes: int.
+
+ .. note::
+ partition should be one of ['home', 'var', 'tmp'].
+ """
+ data = {}
+ data['partition'] = self.parse_partition(kwargs)
+ return self._put('/clusters/%s/partition' % cluster_id, data=data)
+
+ def get_hosts(self, hostname=None, clustername=None):
+ """Lists the details of hosts.
+
+ .. note::
+ The hosts can be filtered by hostname, clustername.
+ These params can be None or missing. If the param
+ is None or missing, the filter will be ignored.
+
+ :param hostname: The name of a host.
+ :type hostname: str.
+ :param clustername: The name of a cluster.
+ :type clustername: str.
+ """
+ params = {}
+ if hostname:
+ params['hostname'] = hostname
+
+ if clustername:
+ params['clustername'] = clustername
+
+ return self._get('/clusterhosts', params=params)
+
+ def get_host(self, host_id):
+ """Lists the details for the specified host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ """
+ return self._get('/clusterhosts/%s' % host_id)
+
+ def get_host_config(self, host_id):
+ """Lists the details of the config for the specified host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ """
+ return self._get('/clusterhosts/%s/config' % host_id)
+
+ def update_host_config(self, host_id, hostname=None,
+ roles=None, raw_data=None, **kwargs):
+ """Updates config for the host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ :param hostname: host name.
+ :type hostname: str.
+ :param security_<security>_username: username of the security name.
+ :type security_<security>_username: str.
+ :param security_<security>_password: passowrd of the security name.
+ :type security_<security>_password: str.
+ :param networking_nameservers: comma seperated nameserver ip address.
+ :type networking_nameservers: str.
+ :param networking_search_path: comma seperated dns name search path.
+ :type networking_search_path: str.
+ :param networking_gateway: gateway ip address for routing to outside.
+ :type networking_gateway: str.
+ :param networking_proxy: proxy url for downloading packages.
+ :type networking_proxy: str.
+ :param networking_ntp_server: ntp server ip address to sync timestamp.
+ :type networking_ntp_server: str.
+ :param networking_<interface>_ip: ip address to host interface.
+ :type networking_<interface>_ip: str.
+ :param networking_<interface>_netmask: netmask to host's interface.
+ :type networking_<interface>_netmask: str.
+ :param networking_<interface>_nic: host physical interface name.
+ :type networking_<interface>_nic: str.
+ :param networking_<interface>_promisc: if the interface is promiscous.
+ :type networking_<interface>_promisc: int, 0 or 1.
+ :param partition_<partition>_percentage: the partiton percentage.
+ :type partition_<partition>_percentage: float between 0 to 100.
+ :param partition_<partition>_mbytes: the partition mbytes.
+ :type partition_<partition>_mbytes: int.
+ :param roles: host assigned roles in the cluster.
+ :type roles: list of str.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if hostname:
+ data['hostname'] = hostname
+
+ sub_kwargs = {}
+ for key, value in kwargs.items():
+ key_name, key_value = key.split('_', 1)
+ sub_kwargs.setdefault(key_name, {})[key_value] = value
+
+ if 'security' in sub_kwargs:
+ data['security'] = self.parse_security(sub_kwargs['security'])
+
+ if 'networking' in sub_kwargs:
+ data['networking'] = self.parse_networking(
+ sub_kwargs['networking'])
+ if 'partition' in sub_kwargs:
+ data['partition'] = self.parse_partition(
+ sub_kwargs['partition'])
+
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusterhosts/%s/config' % host_id, data)
+
+ def delete_from_host_config(self, host_id, delete_key):
+ """Deletes one key in config for the host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ :param delete_key: the key in host config to be deleted.
+ :type delete_key: str.
+ """
+ return self._delete('/clusterhosts/%s/config/%s' % (
+ host_id, delete_key))
+
+ def get_adapters(self, name=None):
+ """Lists details of adapters.
+
+ .. note::
+ the adapter can be filtered by name of name is given and not None.
+
+ :param name: adapter name.
+ :type name: str.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+
+ return self._get('/adapters', params=params)
+
+ def get_adapter(self, adapter_id):
+ """Lists details for the specified adapter.
+
+ :param adapter_id: adapter id.
+ :type adapter_id: int.
+ """
+ return self._get('/adapters/%s' % adapter_id)
+
+ def get_adapter_roles(self, adapter_id):
+ """Lists roles to assign to hosts for the specified adapter.
+
+ :param adapter_id: adapter id.
+ :type adapter_id: int.
+ """
+ return self._get('/adapters/%s/roles' % adapter_id)
+
+ def get_host_installing_progress(self, host_id):
+ """Lists progress details for the specified host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ """
+ return self._get('/clusterhosts/%s/progress' % host_id)
+
+ def get_cluster_installing_progress(self, cluster_id):
+ """Lists progress details for the specified cluster.
+
+ :param cluster_id: cluster id.
+ :param cluster_id: int.
+ """
+
+ return self._get('/clusters/%s/progress' % cluster_id)
+
+ def get_dashboard_links(self, cluster_id):
+ """Lists links for dashboards of deployed cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ """
+ params = {}
+ params['cluster_id'] = cluster_id
+ return self._get('/dashboardlinks', params)
diff --git a/compass-deck/bin/README.md b/compass-deck/bin/README.md
new file mode 100644
index 0000000..7052059
--- /dev/null
+++ b/compass-deck/bin/README.md
@@ -0,0 +1,66 @@
+Compass Binaries and Scripts
+=============================
+
+bin/ contains compass heavy-lifting utility scripts and binaries. These scripts are often called by different components. Some are from core python modules and some are from compass daemon and other services. Most files in `bin/` are placed under `/opt/compass/bin/` after install.sh is complete. Some of them will go to `/usr/bin/` or `/etc/init.d/` as system binaries or services.
+
+###Directories and Files
+
+Below is a walkthrough of all directories and files.
+
+ * ansible-callbacks/ - contains callback scripts for ansible installer.
+ * playbook_done.py - triggered by ansible when all playbooks are successfully executed.
+ Then the script will call compass API to report ansible "complete" status.
+ * chef/ - utility scripts for chef installer.
+ * addcookbooks.py - upload all chef cookbooks to the chef server.
+ * adddatabags.py - (deprecated) upload all chef databags to the chef server.
+ * addroles.py - upload all chef roles to the chef server.
+ * clean_clients.sh - remove all chef clients on the chef server.
+ * clean_environments.sh - remove all chef environments on the chef server.
+ * clean_nodes.sh - remove all chef nodes on the chef server.
+ * cobbler/ - utility scripts for cobbler installer
+ * remove_systems.sh - remove all systems on the cobbler server.
+ * clean_installation_logs.py - remove all the installation logs.
+ * clean_installers.py - remove all configurations and data from all installers.
+ * client.sh - sample client script to call client.py
+ * client.py - compass python client that calls API and deploy a cluster based on given configurations.
+ * compass_check.py - binary file that is placed as /usr/bin/compass. This is the main entrance of compass check CLI.
+ * compassd - (deprecated) old compass daemon file
+ * compass_wsgi.py - compass wsgi module.
+ * csvdeploy.py - script that enable the deployment of clusters from spreadsheets.
+ * delete_clusters.py - script that deletes all given clusters and their underlying hosts.
+ * manage_db.py - utility binary that manages database.
+ * poll_switch.py - utility script to poll machine mac addresses that are connected to certain switches.
+ * progress_update.py - main script to run as a service to update hosts installing progresses.
+ * query_switch.py - independent script to query a switch.
+ * refresh.sh - refresh compass-db, restart compass services and clean up all installers.
+ * runserver.py - manually run a compass server instance.
+ * switch_virtualenv.py.template - template of switch_virtualenv.py. This script enables switching between python
+ virtual environments.
+
+###Script Location and Calling Modules
+Script name | Location | Called by
+--- | --- | ---
+ansible-callbacks/playbook_done.py | /opt/compass/bin/ansible-callbacks/playbookd_done.py | ***ansible-playbook***
+chef/addcookbooks.py | /opt/compass/bin/addcookbooks.py | ***install/chef.sh***
+chef/adddatabags.py(deprecated) | /opt/compass/bin/addcookbooks.py | None
+chef/addroles.py | /opt/compass/bin/addroles.py | ***install/chef.sh***
+chef/clean_clients.sh | /opt/compass/bin/clean_clients.sh | ***compass.tasks.clean_package_installer***
+chef/clean_environments.sh | /opt/compass/bin/clean_environments.sh | ***compass.tasks.clean_package_installer***
+chef/clean_nodes.sh | /opt/compass/bin/clean_nodes.sh | ***compass.tasks.clean_package_installer***
+cobbler/remove_systems.sh | /opt/compass/bin/remove_systems.sh | ***compass.tasks.clean_os_installer***
+clean_installation_logs.py | /opt/compass/bin/clean_installation_logs.py | ***bin/refresh.sh***
+clean_installers.py | /opt/compass/bin/clean_installers.py | ***bin/refresh.sh***
+client.sh | /opt/compass/bin/client.sh | sample client
+client.py | /opt/compass/bin/client.py | ***regtest/regtest.sh***
+compsas_check.py | /opt/compass/bin/compass_check.py | ***compass check cli***
+compassd(deprecated) | None | None
+compass_wsgi.py | /var/www/compass/compass.wsgi | ***Apache daemon***
+csvdeploy.py | /opt/compass/bin/csvdeploy.py | command-line script
+delete_clusters.py | /opt/compass/bin/delete_clusters.py | command-line script
+manage_db.py | /opt/compass/bin/manage_db.py | ***install/compass.sh*** and command-line script
+poll_switch.py | /opt/compass/bin/poll_switch.py | command-line script
+progress_update.py | /opt/compass/bin/progress_update.py | ***compass-progress-updated daemon***
+query_switch.py | /opt/compass/bin/query_switch.py | command-line script
+refresh.sh | /opt/compass/bin/refresh.sh | command-line script
+runserver.py | /opt/compass/bin/runserver.py | command-line script
+switch_virtualenv.py.template | /opt/compass/bin/switch_virtualenv.py | ***all scripts using this library***
diff --git a/compass-deck/bin/ansible_callbacks/playbook_done.py b/compass-deck/bin/ansible_callbacks/playbook_done.py
new file mode 100755
index 0000000..23d75a9
--- /dev/null
+++ b/compass-deck/bin/ansible_callbacks/playbook_done.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Ansible playbook callback after a playbook run has completed."""
+import logging
+import os
+import simplejson as json
+import sys
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir + '/..')
+
+import switch_virtualenv
+
+from compass.apiclient.restful import Client
+from compass.utils import flags
+
+flags.add('compass_server',
+ help='compass server url',
+ default='http://127.0.0.1/api')
+flags.add('compass_user_email',
+ help='compass user email',
+ default='admin@huawei.com')
+flags.add('compass_user_password',
+ help='compass user password',
+ default='admin')
+
+
+class CallbackModule(object):
+ def __init__(self):
+ self.disabled = False
+ try:
+ self.client = self._get_client()
+ except Exception:
+ self.disabled = True
+ logging.error("No compass server found"
+ "disabling this plugin")
+
+ def _get_client(self):
+ return Client(flags.OPTIONS.compass_server)
+
+ def _login(self, client):
+ """get apiclient token."""
+ status, resp = client.get_token(
+ flags.OPTIONS.compass_user_email,
+ flags.OPTIONS.compass_user_password
+ )
+ logging.info(
+ 'login status: %s, resp: %s',
+ status, resp
+ )
+ if status >= 400:
+ raise Exception(
+ 'failed to login %s with user %s',
+ flags.OPTIONS.compass_server,
+ flags.OPTIONS.compass_user_email
+ )
+ return resp['token']
+
+ def playbook_on_stats(self, stats):
+ hosts = sorted(stats.processed.keys())
+ host_vars = self.playbook.inventory.get_variables(hosts[0])
+ cluster_name = host_vars['cluster_name']
+
+ failures = False
+ unreachable = False
+
+ for host in hosts:
+ summary = stats.summarize(host)
+
+ if summary['failures'] > 0:
+ failures = True
+ if summary['unreachable'] > 0:
+ unreachable = True
+
+ if failures or unreachable:
+ return
+
+ self._login(self.client)
+
+ for host in hosts:
+ clusterhost_name = host + "." + cluster_name
+ self.client.clusterhost_ready(clusterhost_name)
diff --git a/compass-deck/bin/chef/addcookbooks.py b/compass-deck/bin/chef/addcookbooks.py
new file mode 100755
index 0000000..f23dac4
--- /dev/null
+++ b/compass-deck/bin/chef/addcookbooks.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""import cookbooks to chef server."""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+
+from compass.utils import flags
+from compass.utils import logsetting
+
+
+flags.add('cookbooks_dir',
+ help='chef cookbooks directory',
+ default='/var/chef/cookbooks')
+
+
+def main():
+ """main entry."""
+ flags.init()
+ logsetting.init()
+ cookbooks_dir = flags.OPTIONS.cookbooks_dir
+ logging.info('add cookbooks %s', cookbooks_dir)
+ cmd = "knife cookbook upload --all --cookbook-path %s" % cookbooks_dir
+ status = os.system(cmd)
+ logging.info('run cmd %s returns %s', cmd, status)
+ if status:
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/compass-deck/bin/chef/adddatabags.py b/compass-deck/bin/chef/adddatabags.py
new file mode 100755
index 0000000..ba2d08c
--- /dev/null
+++ b/compass-deck/bin/chef/adddatabags.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""import databags to chef server."""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.utils import flags
+from compass.utils import logsetting
+
+
+flags.add('databags_dir',
+ help='chef databags directory',
+ default='/var/chef/databags')
+
+
+def main():
+ """main entry."""
+ flags.init()
+ logsetting.init()
+ databags = []
+ databags_dir = flags.OPTIONS.databags_dir
+ for item in os.listdir(databags_dir):
+ databags.append(item)
+
+ for databag in databags:
+ logging.info('add databag %s', databag)
+ cmd = "knife data bag create %s" % databag
+ os.system(cmd)
+ databag_items = []
+ databagitem_dir = os.path.join(databags_dir, databag)
+ for item in os.listdir(databagitem_dir):
+ if item.endswith('.json'):
+ databag_items.append(os.path.join(databagitem_dir, item))
+ else:
+ logging.info('ignore %s in %s', item, databagitem_dir)
+
+ for databag_item in databag_items:
+ logging.info('add databag item %s to databag %s',
+ databag_item, databag)
+ cmd = 'knife data bag from file %s %s' % (databag, databag_item)
+ status = os.system(cmd)
+ logging.info('run cmd %s returns %s', cmd, status)
+ if status:
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/compass-deck/bin/chef/addroles.py b/compass-deck/bin/chef/addroles.py
new file mode 100755
index 0000000..2745506
--- /dev/null
+++ b/compass-deck/bin/chef/addroles.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""script to import roles to chef server."""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.utils import flags
+from compass.utils import logsetting
+
+
+flags.add('roles_dir',
+ help='chef roles directory',
+ default='/var/chef/roles')
+
+
+def main():
+ """main entry."""
+ flags.init()
+ logsetting.init()
+ rolelist = []
+ roles_dir = flags.OPTIONS.roles_dir
+
+ for item in os.listdir(roles_dir):
+ if item.endswith('.rb') or item.endswith('.json'):
+ rolelist.append(os.path.join(roles_dir, item))
+ else:
+ logging.info('ignore %s in %s', item, roles_dir)
+
+ for role in rolelist:
+ logging.info('add role %s', role)
+ cmd = "knife role from file %s" % role
+ status = os.system(cmd)
+ logging.info('run cmd %s returns %s', cmd, status)
+ if status:
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/compass-deck/bin/chef/clean_clients.sh b/compass-deck/bin/chef/clean_clients.sh
new file mode 100755
index 0000000..7a26bea
--- /dev/null
+++ b/compass-deck/bin/chef/clean_clients.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+echo "clean chef clients"
+yes | knife client bulk delete '^(?!chef-).*'
+if [[ "$?" != "0" ]]; then
+ echo "failed to clean all clients"
+fi
diff --git a/compass-deck/bin/chef/clean_environments.sh b/compass-deck/bin/chef/clean_environments.sh
new file mode 100755
index 0000000..f9b5052
--- /dev/null
+++ b/compass-deck/bin/chef/clean_environments.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+echo "clean chef environments"
+environments=$(knife environment list)
+for environment in $environments; do
+ if [[ "$environment" != "_default" ]]; then
+ yes | knife environment delete $environment
+ if [[ "$?" != "0" ]]; then
+ echo "failed to delete environment $environment"
+ else
+ echo "environment $environment is deleted"
+ fi
+ fi
+done
diff --git a/compass-deck/bin/chef/clean_nodes.sh b/compass-deck/bin/chef/clean_nodes.sh
new file mode 100755
index 0000000..8224b82
--- /dev/null
+++ b/compass-deck/bin/chef/clean_nodes.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+echo "clean chef nodes"
+yes | knife node bulk delete '.*'
+if [[ "$?" != "0" ]]; then
+ echo "failed to clean all nodes"
+fi
diff --git a/compass-deck/bin/clean_installation_logs.py b/compass-deck/bin/clean_installation_logs.py
new file mode 100755
index 0000000..0ae20f1
--- /dev/null
+++ b/compass-deck/bin/clean_installation_logs.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""clean all installation logs."""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+def clean_installation_logs():
+ installation_log_dirs = setting.INSTALLATION_LOGDIR
+ successful = True
+ for _, logdir in installation_log_dirs.items():
+ cmd = 'rm -rf %s/*' % logdir
+ status = os.system(cmd)
+ logging.info('run cmd %s resturns %s', cmd, status)
+ if status:
+ successful = False
+ return successful
+
+
+if __name__ == "__main__":
+ flags.init()
+ logsetting.init()
+ clean_installation_logs()
diff --git a/compass-deck/bin/clean_installers.py b/compass-deck/bin/clean_installers.py
new file mode 100755
index 0000000..ae6dab2
--- /dev/null
+++ b/compass-deck/bin/clean_installers.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Scripts to delete cluster and it hosts"""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.actions import clean
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import database
+from compass.db.api import user as user_api
+from compass.tasks.client import celery
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+flags.add_bool('async',
+ help='run in async mode',
+ default=True)
+
+flags.add('os_installers',
+ help='comma seperated os installers',
+ default='')
+flags.add('package_installers',
+ help='comma separated package installers',
+ default='')
+
+
+def clean_installers():
+ os_installers = [
+ os_installer
+ for os_installer in flags.OPTIONS.os_installers.split(',')
+ if os_installer
+ ]
+ package_installers = [
+ package_installer
+ for package_installer in flags.OPTIONS.package_installers.split(',')
+ if package_installer
+ ]
+ user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
+ adapters = adapter_api.list_adapters(user=user)
+ filtered_os_installers = {}
+ filtered_package_installers = {}
+ for adapter in adapters:
+ logging.info(
+ 'got adapter: %s', adapter
+ )
+ if 'os_installer' in adapter:
+ os_installer = adapter['os_installer']
+ os_installer_name = os_installer['alias']
+ if not os_installers or os_installer_name in os_installers:
+ filtered_os_installers[os_installer_name] = os_installer
+ else:
+ logging.info(
+ 'ignore os installer %s', os_installer_name
+ )
+ else:
+ logging.info(
+ 'cannot find os installer in adapter %s',
+ adapter['name']
+ )
+ if 'package_installer' in adapter:
+ package_installer = adapter['package_installer']
+ package_installer_name = package_installer['alias']
+ if (
+ not package_installers or
+ package_installer_name in package_installers
+ ):
+ filtered_package_installers[package_installer_name] = (
+ package_installer
+ )
+ else:
+ logging.info(
+ 'ignore package installer %s', package_installer_name
+ )
+ else:
+ logging.info(
+ 'cannot find package installer in adapter %s',
+ adapter['name']
+ )
+ logging.info(
+ 'clean os installers: %s', filtered_os_installers.keys()
+ )
+ logging.info(
+ 'clean package installers: %s', filtered_package_installers.keys()
+ )
+ if flags.OPTIONS.async:
+ for os_installer_name, os_installer in filtered_os_installers.items():
+ celery.send_task(
+ 'compass.tasks.clean_os_installer',
+ (
+ os_installer['name'],
+ os_installer['settings']
+ )
+ )
+ for package_installer_name, package_installer in (
+ filtered_package_installers.items()
+ ):
+ celery.send_task(
+ 'compass.tasks.clean_package_installer',
+ (
+ package_installer['name'],
+ package_installer['settings']
+ )
+ )
+ else:
+ for os_installer_name, os_installer in (
+ filtered_os_installers.items()
+ ):
+ try:
+ clean.clean_os_installer(
+ os_installer['name'],
+ os_installer['settings']
+ )
+ except Exception as error:
+ logging.error(
+ 'failed to clean os installer %s', os_installer_name
+ )
+ logging.exception(error)
+ for package_installer_name, package_installer in (
+ filtered_package_installers.items()
+ ):
+ try:
+ clean.clean_package_installer(
+ package_installer['name'],
+ package_installer['settings']
+ )
+ except Exception as error:
+ logging.error(
+ 'failed to clean package installer %s',
+ package_installer_name
+ )
+ logging.exception(error)
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ database.init()
+ clean_installers()
diff --git a/compass-deck/bin/client.py b/compass-deck/bin/client.py
new file mode 100755
index 0000000..d8eb59f
--- /dev/null
+++ b/compass-deck/bin/client.py
@@ -0,0 +1,1006 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""binary to deploy a cluster by compass client api."""
+from collections import defaultdict
+import itertools
+import json
+import netaddr
+import os
+import re
+import requests
+from restful import Client
+import socket
+import sys
+import time
+import yaml
+
+ROLE_UNASSIGNED = True
+ROLE_ASSIGNED = False
+
+import log as logging
+LOG = logging.getLogger(__name__)
+
+from oslo_config import cfg
+CONF = cfg.CONF
+
+
+def byteify(input):
+ if isinstance(input, dict):
+ return dict([(byteify(key), byteify(value))
+ for key, value in input.iteritems()])
+ elif isinstance(input, list):
+ return [byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+opts = [
+ cfg.StrOpt(
+ 'compass_server',
+ help='compass server url',
+ default='http://127.0.0.1/api'
+ ),
+ cfg.StrOpt(
+ 'compass_user_email',
+ help='compass user email',
+ default='admin@huawei.com'
+ ),
+ cfg.StrOpt(
+ 'compass_user_password',
+ help='compass user password',
+ default='admin'
+ ),
+ cfg.StrOpt(
+ 'switch_ips',
+ help='comma seperated switch ips',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'switch_credential',
+ help='comma separated <credential key>=<credential value>',
+ default='version=2c,community=public'
+ ),
+ cfg.IntOpt(
+ 'switch_max_retries',
+ help='max retries of poll switch',
+ default=10
+ ),
+ cfg.IntOpt(
+ 'switch_retry_interval',
+ help='interval to repoll switch',
+ default=10
+ ),
+ cfg.BoolOpt(
+ 'poll_switches',
+ help='if the client polls switches',
+ default=True
+ ),
+ cfg.StrOpt(
+ 'machines',
+ help='comma separated mac addresses of machines',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'subnets',
+ help='comma seperated subnets',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'adapter_name',
+ help='adapter name',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'adapter_os_pattern',
+ help='adapter os name',
+ default=r'^(?i)centos.*'
+ ),
+ cfg.StrOpt(
+ 'adapter_target_system_pattern',
+ help='adapter target system name',
+ default='^openstack$'
+ ),
+ cfg.StrOpt(
+ 'adapter_flavor_pattern',
+ help='adapter flavor name',
+ default='allinone'
+ ),
+ cfg.StrOpt(
+ 'cluster_name',
+ help='cluster name',
+ default='cluster1'
+ ),
+ cfg.StrOpt(
+ 'language',
+ help='language',
+ default='EN'
+ ),
+ cfg.StrOpt(
+ 'timezone',
+ help='timezone',
+ default='GMT'
+ ),
+ cfg.StrOpt(
+ 'http_proxy',
+ help='http proxy',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'https_proxy',
+ help='https proxy',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'no_proxy',
+ help='no proxy',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'ntp_server',
+ help='ntp server',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'dns_servers',
+ help='dns servers',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'domain',
+ help='domain',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'search_path',
+ help='search path',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'local_repo_url',
+ help='local repo url',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'default_gateway',
+ help='default gateway',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'server_credential',
+ help=(
+ 'server credential formatted as '
+ '<username>=<password>'
+ ),
+ default='root=root'
+ ),
+ cfg.StrOpt(
+ 'os_config_json_file',
+ help='json formatted os config file',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'service_credentials',
+ help=(
+ 'comma seperated service credentials formatted as '
+ '<servicename>:<username>=<password>,...'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'console_credentials',
+ help=(
+ 'comma seperated console credential formated as '
+ '<consolename>:<username>=<password>'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'hostnames',
+ help='comma seperated hostname',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'host_networks',
+ help=(
+ 'semicomma seperated host name and its networks '
+ '<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscuous>,...'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'partitions',
+ help=(
+ 'comma seperated partitions '
+ '<partition name>=<partition_value>'
+ ),
+ default='tmp:percentage=10%,var:percentage=30%,home:percentage=30%'
+ ),
+ cfg.StrOpt(
+ 'network_mapping',
+ help=(
+ 'comma seperated network mapping '
+ '<network_type>=<interface_name>'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'package_config_json_file',
+ help='json formatted os config file',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'host_roles',
+ help=(
+ 'semicomma separated host roles '
+ '<hostname>=<comma separated roles>'
+ ),
+ default=''
+ ),
+ cfg.StrOpt(
+ 'default_roles',
+ help=(
+ 'comma seperated default roles '
+ '<rolename>'
+ ),
+ default=''
+ ),
+ cfg.IntOpt(
+ 'action_timeout',
+ help='action timeout in seconds',
+ default=60
+ ),
+ cfg.IntOpt(
+ 'deployment_timeout',
+ help='deployment timeout in minutes',
+ default=60
+ ),
+ cfg.IntOpt(
+ 'progress_update_check_interval',
+ help='progress update status check interval in seconds',
+ default=60
+ ),
+ cfg.StrOpt(
+ 'dashboard_url',
+ help='dashboard url',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'dashboard_link_pattern',
+ help='dashboard link pattern',
+ default=r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)'
+ ),
+ cfg.StrOpt(
+ 'cluster_vip',
+ help='cluster ip address',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'enable_secgroup',
+ help='enable security group',
+ default='true'
+ ),
+ cfg.StrOpt(
+ 'network_cfg',
+ help='netowrk config file',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'neutron_cfg',
+ help='netowrk config file',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'cluster_pub_vip',
+ help='cluster ip address',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'cluster_prv_vip',
+ help='cluster ip address',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'repo_name',
+ help='repo name',
+ default=''
+ ),
+ cfg.StrOpt(
+ 'deploy_type',
+ help='deploy type',
+ default='virtual'
+ ),
+]
+CONF.register_cli_opts(opts)
+
+
+def is_role_unassigned(role):
+ return role
+
+
+def _load_config(config_filename):
+ if not config_filename:
+ return {}
+ with open(config_filename) as config_file:
+ content = config_file.read()
+ return json.loads(content)
+
+
+class CompassClient(object):
+ def __init__(self):
+ LOG.info("xh: compass_server=%s" % CONF.compass_server)
+ self.client = Client(CONF.compass_server)
+ self.subnet_mapping = {}
+ self.role_mapping = {}
+ self.host_mapping = {}
+ self.host_ips = defaultdict(list)
+ self.host_roles = {}
+
+ self.login()
+
+ def is_ok(self, status):
+ if status < 300 and status >= 200:
+ return True
+
+ def login(self):
+ status, resp = self.client.get_token(
+ CONF.compass_user_email,
+ CONF.compass_user_password
+ )
+
+ LOG.info(
+ 'login status: %s, resp: %s',
+ status, resp
+ )
+ if self.is_ok(status):
+ return resp["token"]
+ else:
+ raise Exception(
+ 'failed to login %s with user %s',
+ CONF.compass_server,
+ CONF.compass_user_email
+ )
+
+ def get_machines(self):
+ status, resp = self.client.list_machines()
+ LOG.info(
+ 'get all machines status: %s, resp: %s', status, resp)
+ if not self.is_ok(status):
+ raise RuntimeError('failed to get machines')
+
+ machines_to_add = list(set([
+ machine for machine in CONF.machines.split(',')
+ if machine
+ ]))
+
+ LOG.info('machines to add: %s', machines_to_add)
+ machines_db = [str(m["mac"]) for m in resp]
+ LOG.info('machines in db: %s', machines_db)
+ assert(set(machines_db) == set(machines_to_add))
+
+ return [m["id"] for m in resp]
+
+ def get_adapter(self):
+ """get adapter."""
+ status, resp = self.client.list_adapters(name=CONF.adapter_name)
+ LOG.info(
+ 'get all adapters status: %s, resp: %s',
+ status, resp
+ )
+
+ if not self.is_ok(status) or not resp:
+ raise RuntimeError('failed to get adapters')
+
+ os_re = re.compile(CONF.adapter_os_pattern)
+ flavor_re = re.compile(CONF.adapter_flavor_pattern)
+
+ adapter_id = None
+ os_id = None
+ flavor_id = None
+ adapter = None
+
+ adapter = resp[0]
+ adapter_id = adapter['id']
+ for supported_os in adapter['supported_oses']:
+ if not os_re or os_re.match(supported_os['name']):
+ os_id = supported_os['os_id']
+ break
+
+ if 'flavors' in adapter:
+ for flavor in adapter['flavors']:
+ if not flavor_re or flavor_re.match(flavor['name']):
+ flavor_id = flavor['id']
+ break
+
+ assert(os_id and flavor_id)
+ return (adapter_id, os_id, flavor_id)
+
+ def add_subnets(self):
+ subnets = [
+ subnet for subnet in CONF.subnets.split(',')
+ if subnet
+ ]
+
+ assert(subnets)
+
+ subnet_mapping = {}
+ for subnet in subnets:
+ try:
+ netaddr.IPNetwork(subnet)
+ except Exception:
+ raise RuntimeError('subnet %s format is invalid' % subnet)
+
+ status, resp = self.client.add_subnet(subnet)
+ LOG.info(
+ 'add subnet %s status %s response %s',
+ subnet,
+ status,
+ resp
+ )
+ if not self.is_ok(status):
+ raise RuntimeError('failed to add subnet %s' % subnet)
+
+ subnet_mapping[resp['subnet']] = resp['id']
+
+ self.subnet_mapping = subnet_mapping
+
+ def add_cluster(self, adapter_id, os_id, flavor_id):
+ """add a cluster."""
+ cluster_name = CONF.cluster_name
+ assert(cluster_name)
+ status, resp = self.client.add_cluster(
+ cluster_name, adapter_id,
+ os_id, flavor_id)
+
+ if not self.is_ok(status):
+ raise RuntimeError("add cluster failed")
+
+ LOG.info(
+ 'add cluster %s status: %s resp:%s',
+ cluster_name,
+ status,
+ resp
+ )
+
+ if isinstance(resp, list):
+ cluster = resp[0]
+ else:
+ cluster = resp
+
+ cluster_id = cluster['id']
+ flavor = cluster.get('flavor', {})
+ roles = flavor.get('roles', [])
+
+ for role in roles:
+ if role.get('optional', False):
+ self.role_mapping[role['name']] = ROLE_ASSIGNED
+ else:
+ self.role_mapping[role['name']] = ROLE_UNASSIGNED
+
+ return cluster_id
+
+ def add_cluster_hosts(self, cluster_id, machines):
+ hostnames = [
+ hostname for hostname in CONF.hostnames.split(',')
+ if hostname
+ ]
+
+ assert(len(machines) == len(hostnames))
+
+ machines_dict = []
+ for machine_id, hostname in zip(machines, hostnames):
+ machines_dict.append({
+ 'machine_id': machine_id,
+ 'name': hostname
+ })
+
+ # add hosts to the cluster.
+ status, resp = self.client.add_hosts_to_cluster(
+ cluster_id,
+ {'machines': machines_dict})
+
+ LOG.info(
+ 'add machines %s to cluster %s status: %s, resp: %s',
+ machines_dict,
+ cluster_id,
+ status,
+ resp
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("add host to cluster failed")
+
+ for host in resp['hosts']:
+ self.host_mapping[host['hostname']] = host['id']
+
+ assert(len(self.host_mapping) == len(machines))
+
+ def set_cluster_os_config(self, cluster_id):
+ """set cluster os config."""
+ os_config = {}
+ language = CONF.language
+ timezone = CONF.timezone
+ http_proxy = CONF.http_proxy
+ https_proxy = CONF.https_proxy
+ local_repo_url = CONF.local_repo_url
+ repo_name = CONF.repo_name
+ deploy_type = CONF.deploy_type
+ if not https_proxy and http_proxy:
+ https_proxy = http_proxy
+
+ no_proxy = [
+ no_proxy for no_proxy in CONF.no_proxy.split(',')
+ if no_proxy
+ ]
+
+ compass_server = CONF.compass_server
+ if http_proxy:
+ for hostname, ips in self.host_ips.items():
+ no_proxy.append(hostname)
+ no_proxy.extend(ips)
+
+ ntp_server = CONF.ntp_server or compass_server
+
+ dns_servers = [
+ dns_server for dns_server in CONF.dns_servers.split(',')
+ if dns_server
+ ]
+ if not dns_servers:
+ dns_servers = [compass_server]
+
+ domain = CONF.domain
+ if not domain:
+ raise Exception('domain is not defined')
+
+ search_path = [
+ search_path for search_path in CONF.search_path.split(',')
+ if search_path
+ ]
+
+ if not search_path:
+ search_path = [domain]
+
+ default_gateway = CONF.default_gateway
+ if not default_gateway:
+ raise Exception('default gateway is not defined')
+
+ general_config = {
+ 'language': language,
+ 'timezone': timezone,
+ 'ntp_server': ntp_server,
+ 'dns_servers': dns_servers,
+ 'default_gateway': default_gateway
+ }
+
+ if http_proxy:
+ general_config['http_proxy'] = http_proxy
+ if https_proxy:
+ general_config['https_proxy'] = https_proxy
+ if no_proxy:
+ general_config['no_proxy'] = no_proxy
+ if domain:
+ general_config['domain'] = domain
+ if search_path:
+ general_config['search_path'] = search_path
+ if local_repo_url:
+ general_config['local_repo'] = local_repo_url
+ if repo_name:
+ general_config['repo_name'] = repo_name
+ if deploy_type:
+ general_config['deploy_type'] = deploy_type
+
+ os_config["general"] = general_config
+
+ server_credential = CONF.server_credential
+ if '=' in server_credential:
+ server_username, server_password = server_credential.split('=', 1)
+ elif server_credential:
+ server_username = server_password = server_credential
+ else:
+ server_username = 'root'
+ server_password = 'root'
+
+ os_config['server_credentials'] = {
+ 'username': server_username,
+ 'password': server_password
+ }
+
+ partitions = [
+ partition for partition in CONF.partitions.split(',')
+ if partition
+ ]
+
+ partition_config = {}
+ for partition in partitions:
+ assert("=" in partition)
+
+ partition_name, partition_value = partition.split('=', 1)
+ partition_name = partition_name.strip()
+ partition_value = partition_value.strip()
+
+ assert(partition_name and partition_value)
+
+ if partition_value.endswith('%'):
+ partition_type = 'percentage'
+ partition_value = int(partition_value[:-1])
+ else:
+ partition_type = 'size'
+
+ partition_config[partition_name] = {
+ partition_type: partition_value
+ }
+
+ os_config['partition'] = partition_config
+
+ """
+ os_config_filename = CONF.os_config_json_file
+ if os_config_filename:
+ util.merge_dict(
+ os_config, _load_config(os_config_filename)
+ )
+ """
+
+ status, resp = self.client.update_cluster_config(
+ cluster_id, os_config=os_config)
+ LOG.info(
+ 'set os config %s to cluster %s status: %s, resp: %s',
+ os_config, cluster_id, status, resp)
+ if not self.is_ok(status):
+ raise RuntimeError('failed to set os config %s to cluster %s'
+ % (os_config, cluster_id))
+
+ def set_host_networking(self):
+ """set cluster hosts networking."""
+ def get_subnet(ip_str):
+ try:
+ LOG.info("subnets: %s" % self.subnet_mapping.keys())
+ ip = netaddr.IPAddress(ip_str)
+ for cidr, subnet_id in self.subnet_mapping.items():
+ subnet = netaddr.IPNetwork(cidr)
+ if ip in subnet:
+ return True, subnet_id
+
+ LOG.info("ip %s not in %s" % (ip_str, cidr))
+ return False, None
+ except Exception:
+ LOG.exception("ip addr %s is invalid" % ip_str)
+ return False, None
+
+ for host_network in CONF.host_networks.split(';'):
+ hostname, networks_str = host_network.split(':', 1)
+ hostname = hostname.strip()
+ networks_str = networks_str.strip()
+
+ assert(hostname in self.host_mapping)
+
+ host_id = self.host_mapping[hostname]
+ intf_list = networks_str.split(',')
+ for intf_str in intf_list:
+ interface, intf_properties = intf_str.split('=', 1)
+ intf_properties = intf_properties.strip().split('|')
+
+ assert(intf_properties)
+ ip_str = intf_properties[0]
+
+ status, subnet_id = get_subnet(ip_str)
+ if not status:
+ raise RuntimeError("ip addr %s is invalid" % ip_str)
+
+ properties = dict([
+ (intf_property, True)
+ for intf_property in intf_properties[1:]
+ ])
+
+ LOG.info(
+ 'add host %s interface %s ip %s network proprties %s',
+ hostname, interface, ip_str, properties)
+
+ status, response = self.client.add_host_network(
+ host_id, interface, ip=ip_str, subnet_id=subnet_id,
+ **properties
+ )
+
+ LOG.info(
+ 'add host %s interface %s ip %s network properties %s '
+ 'status %s: %s',
+ hostname, interface, ip_str, properties,
+ status, response
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("add host network failed")
+
+ self.host_ips[hostname].append(ip_str)
+
+ def set_cluster_package_config(self, cluster_id):
+ """set cluster package config."""
+ package_config = {"security": {}}
+
+ service_credentials = [
+ service_credential
+ for service_credential in CONF.service_credentials.split(',')
+ if service_credential
+ ]
+
+ service_credential_cfg = {}
+ LOG.info(
+ 'service credentials: %s', service_credentials
+ )
+
+ for service_credential in service_credentials:
+ if ':' not in service_credential:
+ raise Exception(
+ 'no : in service credential %s' % service_credential
+ )
+ service_name, service_pair = service_credential.split(':', 1)
+ if '=' not in service_pair:
+ raise Exception(
+ 'there is no = in service %s security' % service_name
+ )
+
+ username, password = service_pair.split('=', 1)
+ service_credential_cfg[service_name] = {
+ 'username': username,
+ 'password': password
+ }
+
+ console_credentials = [
+ console_credential
+ for console_credential in CONF.console_credentials.split(',')
+ if console_credential
+ ]
+
+ LOG.info(
+ 'console credentials: %s', console_credentials
+ )
+
+ console_credential_cfg = {}
+ for console_credential in console_credentials:
+ if ':' not in console_credential:
+ raise Exception(
+ 'there is no : in console credential %s'
+ % console_credential
+ )
+ console_name, console_pair = console_credential.split(':', 1)
+ if '=' not in console_pair:
+ raise Exception(
+ 'there is no = in console %s security' % console_name
+ )
+ username, password = console_pair.split('=', 1)
+ console_credential_cfg[console_name] = {
+ 'username': username,
+ 'password': password
+ }
+
+ package_config["security"] = {
+ "service_credentials": service_credential_cfg,
+ "console_credentials": console_credential_cfg
+ }
+
+ network_mapping = dict([
+ network_pair.split('=', 1)
+ for network_pair in CONF.network_mapping.split(',')
+ if '=' in network_pair
+ ])
+
+ package_config['network_mapping'] = network_mapping
+
+ assert(os.path.exists(CONF.network_cfg))
+ network_cfg = yaml.load(open(CONF.network_cfg))
+ package_config["network_cfg"] = network_cfg
+
+ assert(os.path.exists(CONF.neutron_cfg))
+ neutron_cfg = yaml.load(open(CONF.neutron_cfg))
+ package_config["neutron_config"] = neutron_cfg
+
+ """
+ package_config_filename = CONF.package_config_json_file
+ if package_config_filename:
+ util.merge_dict(
+ package_config, _load_config(package_config_filename)
+ )
+ """
+ package_config['ha_proxy'] = {}
+ if CONF.cluster_vip:
+ package_config["ha_proxy"]["vip"] = CONF.cluster_vip
+
+ package_config['enable_secgroup'] = (CONF.enable_secgroup == "true")
+
+ status, resp = self.client.update_cluster_config(
+ cluster_id, package_config=package_config)
+ LOG.info(
+ 'set package config %s to cluster %s status: %s, resp: %s',
+ package_config, cluster_id, status, resp)
+
+ if not self.is_ok(status):
+ raise RuntimeError("set cluster package_config failed")
+
+ def set_host_roles(self, cluster_id, host_id, roles):
+ status, response = self.client.update_cluster_host(
+ cluster_id, host_id, roles=roles)
+
+ LOG.info(
+ 'set cluster %s host %s roles %s status %s: %s',
+ cluster_id, host_id, roles, status, response
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("set host roles failed")
+
+ for role in roles:
+ if role in self.role_mapping:
+ self.role_mapping[role] = ROLE_ASSIGNED
+
+ def set_all_hosts_roles(self, cluster_id):
+ for host_str in CONF.host_roles.split(';'):
+ host_str = host_str.strip()
+ hostname, roles_str = host_str.split('=', 1)
+
+ assert(hostname in self.host_mapping)
+ host_id = self.host_mapping[hostname]
+
+ roles = [role.strip() for role in roles_str.split(',') if role]
+
+ self.set_host_roles(cluster_id, host_id, roles)
+ self.host_roles[hostname] = roles
+
+ unassigned_hostnames = list(set(self.host_mapping.keys())
+ - set(self.host_roles.keys()))
+
+ unassigned_roles = [role for role, status in self.role_mapping.items()
+ if is_role_unassigned(status)]
+
+ assert(len(unassigned_hostnames) >= len(unassigned_roles))
+
+ for hostname, role in map(
+ None,
+ unassigned_hostnames,
+ unassigned_roles
+ ):
+ host_id = self.host_mapping[hostname]
+ self.set_host_roles(cluster_id, host_id, [role])
+ self.host_roles[hostname] = [role]
+
+ unassigned_hostnames = list(set(self.host_mapping.keys())
+ - set(self.host_roles.keys()))
+
+ if not unassigned_hostnames:
+ return
+
+ # assign default roles to unassigned hosts
+ default_roles = [
+ role for role in CONF.default_roles.split(',')
+ if role
+ ]
+
+ assert(default_roles)
+
+ cycle_roles = itertools.cycle(default_roles)
+ for hostname in unassigned_hostnames:
+ host_id = self.host_mapping[hostname]
+ roles = [cycle_roles.next()]
+ self.set_host_roles(cluster_id, host_id, roles)
+ self.host_roles[hostname] = roles
+
+ def deploy_clusters(self, cluster_id):
+ host_ids = self.host_mapping.values()
+
+ status, response = self.client.review_cluster(
+ cluster_id, review={'hosts': host_ids}
+ )
+ LOG.info(
+ 'review cluster %s hosts %s, status %s: %s',
+ cluster_id, host_ids, status, response
+ )
+
+ # TODO('what this doning?')
+ if not self.is_ok(status):
+ raise RuntimeError("review cluster host failed")
+
+ status, response = self.client.deploy_cluster(
+ cluster_id, deploy={'hosts': host_ids}
+ )
+ LOG.info(
+ 'deploy cluster %s hosts %s status %s: %s',
+ cluster_id, host_ids, status, response
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("deploy cluster failed")
+
+ def get_installing_progress(self, cluster_id):
+ """get intalling progress."""
+ action_timeout = time.time() + 60 * float(CONF.action_timeout)
+ deployment_timeout = time.time() + 60 * float(
+ CONF.deployment_timeout)
+
+ current_time = time.time()
+ deployment_failed = True
+ while current_time < deployment_timeout:
+ status, cluster_state = self.client.get_cluster_state(cluster_id)
+ LOG.info(
+ 'get cluster %s state status %s: %s',
+ cluster_id, status, cluster_state
+ )
+ if not self.is_ok(status):
+ raise RuntimeError("can not get cluster state")
+
+ if cluster_state['state'] in ['UNINITIALIZED', 'INITIALIZED']:
+ if current_time >= action_timeout:
+ deployment_failed = True
+ break
+ else:
+ continue
+
+ elif cluster_state['state'] == 'SUCCESSFUL':
+ deployment_failed = False
+ break
+ elif cluster_state['state'] == 'ERROR':
+ deployment_failed = True
+ break
+
+ if deployment_failed:
+ raise RuntimeError("deploy cluster failed")
+
+ def check_dashboard_links(self, cluster_id):
+ dashboard_url = CONF.dashboard_url
+ if not dashboard_url:
+ LOG.info('no dashboarde url set')
+ return
+ dashboard_link_pattern = re.compile(
+ CONF.dashboard_link_pattern)
+ r = requests.get(dashboard_url, verify=False)
+ r.raise_for_status()
+ match = dashboard_link_pattern.search(r.text)
+ if match:
+ LOG.info(
+ 'dashboard login page for cluster %s can be downloaded',
+ cluster_id)
+ else:
+ msg = (
+ '%s failed to be downloaded\n'
+ 'the context is:\n%s\n'
+ ) % (dashboard_url, r.text)
+ raise Exception(msg)
+
+
+def main():
+ client = CompassClient()
+ machines = client.get_machines()
+
+ LOG.info('machines are %s', machines)
+
+ client.add_subnets()
+ adapter_id, os_id, flavor_id = client.get_adapter()
+ cluster_id = client.add_cluster(adapter_id, os_id, flavor_id)
+
+ client.add_cluster_hosts(cluster_id, machines)
+ client.set_host_networking()
+ client.set_cluster_os_config(cluster_id)
+
+ if flavor_id:
+ client.set_cluster_package_config(cluster_id)
+
+ client.set_all_hosts_roles(cluster_id)
+ client.deploy_clusters(cluster_id)
+
+ client.get_installing_progress(cluster_id)
+ client.check_dashboard_links(cluster_id)
+
+if __name__ == "__main__":
+ CONF(args=sys.argv[1:])
+ main()
diff --git a/compass-deck/bin/client.sh b/compass-deck/bin/client.sh
new file mode 100755
index 0000000..48c70e2
--- /dev/null
+++ b/compass-deck/bin/client.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+/opt/compass/bin/client.py --switch_ips=172.29.8.40 --machines=00:0c:29:a7:ea:4b --adapter_name=os_only --adapter_flavor_pattern= --subnets=10.145.88.0/23,172.16.0.0/16 --cluster_name=cluster1 --domain=ods.com --default_gateway=10.145.88.1 --service_credentials= --console_credentials= --hostnames=host1 --host_networks="host1:eth0=10.145.89.201|is_mgmt,eth1=172.16.100.201|is_promiscuous" --partitions="/var=50%,/home=30%" --network_mapping= --host_roles= --dashboard_url=
diff --git a/compass-deck/bin/cobbler/remove_systems.sh b/compass-deck/bin/cobbler/remove_systems.sh
new file mode 100755
index 0000000..1973d43
--- /dev/null
+++ b/compass-deck/bin/cobbler/remove_systems.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+systems=$(cobbler system list)
+echo "remove systems: $systems"
+for system in $systems; do
+ cobbler system remove --name=$system
+ if [[ "$?" != "0" ]]; then
+ echo "failed to remove system %s"
+ fi
+done
diff --git a/compass-deck/bin/compass_check.py b/compass-deck/bin/compass_check.py
new file mode 100755
index 0000000..5fc7e69
--- /dev/null
+++ b/compass-deck/bin/compass_check.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""compass health check."""
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+import compass.actions.cli as cli
+
+sys.exit(cli.main())
diff --git a/compass-deck/bin/compass_wsgi.py b/compass-deck/bin/compass_wsgi.py
new file mode 100755
index 0000000..9e889e7
--- /dev/null
+++ b/compass-deck/bin/compass_wsgi.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""compass wsgi module."""
+import os
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+flags.init()
+flags.OPTIONS.logfile = setting.WEB_LOGFILE
+logsetting.init()
+
+
+from compass.api import api as compass_api
+
+
+compass_api.init()
+application = compass_api.app
diff --git a/compass-deck/bin/compassd b/compass-deck/bin/compassd
new file mode 100755
index 0000000..fc77bb9
--- /dev/null
+++ b/compass-deck/bin/compassd
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+RETVAL_CELERY=0
+RETVAL_PROGRESS_UPDATE=0
+start() {
+ service compass-celeryd start
+ RETVAL_CELERY=$?
+ service compass-progress-updated start
+ RETVAL_PROGRESS_UPDATE=$?
+}
+
+stop() {
+ service compass-celeryd stop
+ RETVAL_CELERY=$?
+ service compass-progress-updated stop
+ RETVAL_PROGRESS_UPDATE=$?
+}
+
+restart() {
+ stop
+ start
+}
+case "$1" in
+ start|stop|restart)
+ $1
+ ;;
+ status)
+ service compass-celeryd status
+ RETVAL_CELERY=$?
+ service compass-progress-updated status
+ RETVAL_PROGRESS_UPDATE=$?
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|restart}"
+ exit 1
+ ;;
+esac
+if [[ "$RETVAL_CELERY" != "0" ]]; then
+ exit $RETVAL_CELERY
+fi
+if [[ "$RETVAL_PROGRESS_UPDATE" != "0" ]]; then
+ exit $RETVAL_PROGRESS_UPDATE
+fi
diff --git a/compass-deck/bin/csvdeploy.py b/compass-deck/bin/csvdeploy.py
new file mode 100755
index 0000000..23b0c46
--- /dev/null
+++ b/compass-deck/bin/csvdeploy.py
@@ -0,0 +1,333 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""deploy cluster from csv file."""
+import ast
+import copy
+import csv
+import os
+import re
+import sys
+
+from multiprocessing import Process
+from multiprocessing import Queue
+from optparse import OptionParser
+
+try:
+ from compass.apiclient.restful import Client
+except ImportError:
+ curr_dir = os.path.dirname(os.path.realpath(__file__))
+ apiclient_dir = os.path.dirname(curr_dir) + '/compass/apiclient'
+ sys.path.append(apiclient_dir)
+ from restful import Client
+
+
+DELIMITER = ","
+
+# Sqlite tables
+TABLES = {
+ 'switch_config': {'columns': ['id', 'ip', 'filter_port']},
+ 'switch': {'columns': ['id', 'ip', 'credential_data']},
+ 'machine': {'columns': ['id', 'mac', 'port', 'vlan', 'switch_id']},
+ 'cluster': {'columns': ['id', 'name', 'security_config',
+ 'networking_config', 'partition_config',
+ 'adapter_id', 'state']},
+ 'cluster_host': {'columns': ['id', 'cluster_id', 'hostname', 'machine_id',
+ 'config_data', 'state']},
+ 'adapter': {'columns': ['id', 'name', 'os', 'target_system']},
+ 'role': {'columns': ['id', 'name', 'target_system', 'description']}
+}
+
+
+def start(csv_dir, compass_url):
+ """Start deploy both failed clusters and new clusters."""
+ # Get clusters and hosts data from CSV
+ clusters_data = get_csv('cluster.csv', csv_dir)
+ hosts_data = get_csv('cluster_host.csv', csv_dir)
+ data = {}
+ for cluster in clusters_data:
+ tmp = {}
+ tmp['cluster_data'] = cluster
+ tmp['hosts_data'] = []
+ data[cluster['id']] = tmp
+
+ for host in hosts_data:
+ cluster_id = host['cluster_id']
+ if cluster_id not in data:
+ print ("Unknown cluster_id=%s of the host! host_id=%s!"
+ % (cluster_id, host['id']))
+ sys.exit(1)
+
+ data[cluster_id]['hosts_data'].append(host)
+
+ apiClient = _APIClient(compass_url)
+ results_q = Queue()
+ ps = []
+ for elem in data:
+ cluster_data = data[elem]['cluster_data']
+ hosts_data = data[elem]['hosts_data']
+ p = Process(target=apiClient.execute,
+ args=(cluster_data, hosts_data, results_q))
+ ps.append(p)
+ p.start()
+
+ for p in ps:
+ p.join()
+
+ progress_file = '/'.join((csv_dir, 'progress.csv'))
+ write_progress_to_file(results_q, progress_file)
+
+
+def write_progress_to_file(results_q, progress_file):
+ cluster_headers = ['cluster_id', 'progress_url']
+ host_headers = ['host_id', 'progress_url']
+
+ with open(progress_file, 'wb') as f:
+ print "Writing all progress information to %s......" % progress_file
+ writer = csv.writer(f, delimiter=DELIMITER, quoting=csv.QUOTE_MINIMAL)
+ while not results_q.empty():
+ record = results_q.get()
+ hosts = []
+ cluster = [record['deployment']['cluster']['cluster_id'],
+ record['deployment']['cluster']['url']]
+ writer.writerow(cluster_headers)
+ writer.writerow(cluster)
+
+ for elem in record['deployment']['hosts']:
+ host = [elem['host_id'], elem['url']]
+ hosts.append(host)
+
+ writer.writerow(host_headers)
+ writer.writerows(hosts)
+ print "Done!\n"
+
+
+def get_csv(fname, csv_dir):
+ """Parse csv files into python variables.
+
+ .. note::
+ all nested fields in db will be assembled.
+
+ :param fname: CSV file name
+ :param csv_dir: CSV files directory
+
+ :returns: list of dict which key is column name and value is its data.
+ """
+ headers = []
+ rows = []
+ file_dir = '/'.join((csv_dir, fname))
+ with open(file_dir) as f:
+ reader = csv.reader(f, delimiter=DELIMITER, quoting=csv.QUOTE_MINIMAL)
+ headers = reader.next()
+ rows = [x for x in reader]
+
+ result = []
+ for row in rows:
+ data = {}
+ for col_name, value in zip(headers, row):
+ if re.match(r'^[\d]+$', value):
+ # the value should be an integer
+ value = int(value)
+ elif re.match(r'^\[(\'\w*\'){1}(\s*,\s*\'\w*\')*\]$', value):
+ # the value should be a list
+ value = ast.literal_eval(value)
+ elif value == 'None':
+ value = ''
+
+ if col_name.find('.') > 0:
+ tmp_result = {}
+ tmp_result[col_name.split('.')[-1]] = value
+ keys = col_name.split('.')[::-1][1:]
+ for key in keys:
+ tmp = {}
+ tmp[key] = tmp_result
+ tmp_result = tmp
+ merge_dict(data, tmp_result)
+ else:
+ data[col_name] = value
+
+ result.append(data)
+
+ return result
+
+
+def merge_dict(lhs, rhs, override=True):
+ """Merge nested right dict into left nested dict recursively.
+
+ :param lhs: dict to be merged into.
+ :type lhs: dict
+ :param rhs: dict to merge from.
+ :type rhs: dict
+ :param override: the value in rhs overide the value in left if True.
+ :type override: str
+
+ :raises: TypeError if lhs or rhs is not a dict.
+ """
+ if not rhs:
+ return
+
+ if not isinstance(lhs, dict):
+ raise TypeError('lhs type is %s while expected is dict' % type(lhs),
+ lhs)
+
+ if not isinstance(rhs, dict):
+ raise TypeError('rhs type is %s while expected is dict' % type(rhs),
+ rhs)
+
+ for key, value in rhs.items():
+ if isinstance(value, dict) and key in lhs and isinstance(lhs[key],
+ dict):
+ merge_dict(lhs[key], value, override)
+ else:
+ if override or key not in lhs:
+ lhs[key] = copy.deepcopy(value)
+
+
+class _APIClient(Client):
+ def __init__(self, url, headers=None, proxies=None, stream=None):
+ super(_APIClient, self).__init__(url, headers, proxies, stream)
+
+ def set_cluster_resource(self, cluster_id, resource, data):
+ url = "/clusters/%d/%s" % (cluster_id, resource)
+ return self._put(url, data=data)
+
+ def execute(self, cluster_data, hosts_data, resp_results):
+ """The process includes creating or updating a cluster.
+
+ The cluster configuration, add or update a host in the cluster,
+ and deploy the updated hosts.
+
+ :param cluster_data: the dictionary of cluster data
+ """
+ cluster_id = cluster_data['id']
+ code, resp = self.get_cluster(cluster_id)
+ if code == 404:
+ # Create a new cluster
+ name = cluster_data['name']
+ adapter_id = cluster_data['adapter_id']
+ code, resp = self.add_cluster(name, adapter_id)
+
+ if code != 200:
+ print ("Failed to create the cluster which name is "
+ "%s!\nError message: %s" % (name, resp['message']))
+ sys.exit(1)
+
+ # Update the config(security, networking, partition) of the cluster
+ security_req = {}
+ networking_req = {}
+ partition_req = {}
+
+ security_req['security'] = cluster_data['security_config']
+ networking_req['networking'] = cluster_data['networking_config']
+ partition_req['partition'] = cluster_data['partition_config']
+
+ print "Update Security config......."
+ code, resp = self.set_cluster_resource(cluster_id, 'security',
+ security_req)
+ if code != 200:
+ print ("Failed to update Security config for cluster id=%s!\n"
+ "Error message: " % (cluster_id, resp['message']))
+ sys.exit(1)
+
+ print "Update Networking config......."
+ code, resp = self.set_cluster_resource(cluster_id, 'networking',
+ networking_req)
+ if code != 200:
+ print ("Failed to update Networking config for cluster id=%s!\n"
+ "Error message: %s" % (cluster_id, resp['message']))
+ sys.exit(1)
+
+ print "Update Partition config......."
+ code, resp = self.set_cluster_resource(cluster_id, 'partition',
+ partition_req)
+ if code != 200:
+ print ("Failed to update Partition config for cluster id=%s!\n"
+ "Error message: " % (cluster_id, resp['message']))
+ sys.exit(1)
+
+ deploy_list = []
+ deploy_hosts_data = []
+
+ machines_list = []
+ new_hosts_data = []
+ for record in hosts_data:
+ if record['state'] and int(record['deploy_action']):
+ deploy_list.append(record['id'])
+ deploy_hosts_data.append(record)
+
+ elif int(record['deploy_action']):
+ machines_list.append(record['machine_id'])
+ new_hosts_data.append(record)
+
+ if machines_list:
+ # add new hosts to the cluster
+ code, resp = self.add_hosts(cluster_id, machines_list)
+ if code != 200:
+ print ("Failed to add hosts to the cluster id=%s!\n"
+ "Error message: %s.\nfailed hosts are %s"
+ % (cluster_id, resp['message'], resp['failedMachines']))
+ sys.exit(1)
+
+ for record, host in zip(new_hosts_data, resp['cluster_hosts']):
+ record['id'] = host['id']
+ deploy_list.append(host['id'])
+ deploy_hosts_data.append(record)
+
+ # Update the config of each host in the cluster
+ for host in deploy_hosts_data:
+ req = {}
+ host_id = host['id']
+ print "Updating the config of host id=%s" % host['id']
+ req['hostname'] = host['hostname']
+ req.update(host['config_data'])
+ code, resp = self.update_host_config(int(host_id), raw_data=req)
+ if code != 200:
+ print ("Failed to update the config of the host id=%s!\n"
+ "Error message: %s" % (host_id, resp['message']))
+ sys.exit(1)
+
+ # Start to deploy the cluster
+ print "Start to deploy the cluster!....."
+ deploy_req = {"deploy": deploy_list}
+ code, resp = self.deploy_hosts(cluster_id, raw_data=deploy_req)
+ print "---Cluster Info---"
+ print "cluster_id url"
+ print (" %s %s"
+ % (resp['deployment']['cluster']['cluster_id'],
+ resp['deployment']['cluster']['url']))
+ print "---Hosts Info-----"
+ print "host_id url"
+ for host in resp['deployment']['hosts']:
+ print " %s %s" % (host['host_id'], host['url'])
+ print "---------------------------------------------------------------"
+ print "\n"
+ resp_results.put(resp)
+
+
+if __name__ == "__main__":
+ usage = "usage: %prog [options]"
+ parser = OptionParser(usage)
+
+ parser.add_option("-d", "--csv-dir", dest="csv_dir",
+ help="The directory of CSV files used for depolyment")
+ parser.add_option("-u", "--compass-url", dest="compass_url",
+ help="The URL of Compass server")
+ (options, args) = parser.parse_args()
+
+ if not os.exists(options.csv_dir):
+ print "Cannot find the directory: %s" % options.csv_dir
+
+ start(options.csv_dir, options.compass_url)
diff --git a/compass-deck/bin/delete_clusters.py b/compass-deck/bin/delete_clusters.py
new file mode 100755
index 0000000..fddec17
--- /dev/null
+++ b/compass-deck/bin/delete_clusters.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Scripts to delete cluster and it hosts"""
+import logging
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+
+from compass.db.api import cluster as cluster_api
+from compass.db.api import database
+from compass.db.api import host as host_api
+from compass.db.api import user as user_api
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+flags.add('clusternames',
+ help='comma seperated cluster names',
+ default='')
+flags.add_bool('delete_hosts',
+ help='if all hosts related to the cluster will be deleted',
+ default=False)
+
+
+def delete_clusters():
+ clusternames = [
+ clustername
+ for clustername in flags.OPTIONS.clusternames.split(',')
+ if clustername
+ ]
+ user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
+ list_cluster_args = {}
+ if clusternames:
+ list_cluster_args['name'] = clusternames
+ clusters = cluster_api.list_clusters(
+ user=user, **list_cluster_args
+ )
+ delete_underlying_host = flags.OPTIONS.delete_hosts
+ for cluster in clusters:
+ cluster_id = cluster['id']
+ cluster_api.del_cluster(
+ cluster_id, True, False, delete_underlying_host, user=user
+ )
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ database.init()
+ delete_clusters()
diff --git a/compass-deck/bin/manage_db.py b/compass-deck/bin/manage_db.py
new file mode 100755
index 0000000..3e56433
--- /dev/null
+++ b/compass-deck/bin/manage_db.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""utility binary to manage database."""
+import os
+import os.path
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+from flask.ext.script import Manager
+
+from compass.api import app
+from compass.db.api import database
+from compass.db.api import switch as switch_api
+from compass.db.api import user as user_api
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+flags.add('table_name',
+ help='table name',
+ default='')
+flags.add('clusters',
+ help=(
+ 'clusters and hosts of each cluster, the format is as '
+ 'clusterid:hostname1,hostname2,...;...'),
+ default='')
+flags.add_bool('async',
+ help='ryn in async mode',
+ default=True)
+flags.add('switch_machines_file',
+ help=(
+ 'files for switches and machines '
+ 'connected to each switch. each line in the file '
+ 'is machine,<switch ip>,<switch port>,<vlan>,<mac> '
+ 'or switch,<switch_ip>,<switch_vendor>,'
+ '<switch_version>,<switch_community>,<switch_state>'),
+ default='')
+flags.add('search_cluster_properties',
+ help='comma separated properties to search in cluster config',
+ default='')
+flags.add('print_cluster_properties',
+ help='comma separated cluster config properties to print',
+ default='')
+flags.add('search_host_properties',
+ help='comma separated properties to search in host config',
+ default='')
+flags.add('print_host_properties',
+ help='comma separated host config properties to print',
+ default='')
+
+
+app_manager = Manager(app, usage="Perform database operations")
+
+
+TABLE_MAPPING = {
+}
+
+
+@app_manager.command
+def list_config():
+ "List the commands."
+ for key, value in app.config.items():
+ print key, value
+
+
+@app_manager.command
+def checkdb():
+ """check if db exists."""
+ if setting.DATABASE_TYPE == 'file':
+ if os.path.exists(setting.DATABASE_FILE):
+ sys.exit(0)
+ else:
+ sys.exit(1)
+
+ sys.exit(0)
+
+
+@app_manager.command
+def createdb():
+ """Creates database from sqlalchemy models."""
+ database.init()
+ try:
+ database.drop_db()
+ except Exception:
+ pass
+
+ if setting.DATABASE_TYPE == 'file':
+ if os.path.exists(setting.DATABASE_FILE):
+ os.remove(setting.DATABASE_FILE)
+ database.create_db()
+ if setting.DATABASE_TYPE == 'file':
+ os.chmod(setting.DATABASE_FILE, 0o777)
+
+
+@app_manager.command
+def dropdb():
+ """Drops database from sqlalchemy models."""
+ database.init()
+ database.drop_db()
+
+
+@app_manager.command
+def set_switch_machines():
+ """Set switches and machines.
+
+ .. note::
+ --switch_machines_file is the filename which stores all switches
+ and machines information.
+ each line in fake_switches_files presents one machine.
+ the format of each line machine,<switch_ip>,<switch_port>,<vlan>,<mac>
+ or switch,<switch_ip>,<switch_vendor>,<switch_version>,
+ <switch_community>,<switch_state>
+ """
+ if not flags.OPTIONS.switch_machines_file:
+ print 'flag --switch_machines_file is missing'
+ return
+ database.init()
+ switches, switch_machines = util.get_switch_machines_from_file(
+ flags.OPTIONS.switch_machines_file)
+ user = user_api.get_user_object(
+ setting.COMPASS_ADMIN_EMAIL
+ )
+ switch_mapping = {}
+ for switch in switches:
+ added_switch = switch_api.add_switch(
+ False, user=user, **switch
+ )
+ switch_mapping[switch['ip']] = added_switch['id']
+ for switch_ip, machines in switch_machines.items():
+ if switch_ip not in switch_mapping:
+ print 'switch ip %s not found' % switch_ip
+ sys.exit(1)
+ switch_id = switch_mapping[switch_ip]
+ for machine in machines:
+ switch_api.add_switch_machine(
+ switch_id, False, user=user, **machine
+ )
+
+
+if __name__ == "__main__":
+ flags.init()
+ logsetting.init()
+ app_manager.run()
diff --git a/compass-deck/bin/poll_switch.py b/compass-deck/bin/poll_switch.py
new file mode 100755
index 0000000..c61e1dd
--- /dev/null
+++ b/compass-deck/bin/poll_switch.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""main script to poll machines which is connected to the switches."""
+import functools
+import logging
+import os
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+import lockfile
+from multiprocessing import Pool
+
+from compass.actions import poll_switch
+from compass.actions import util
+from compass.db.api import database
+from compass.db.api import switch as switch_api
+from compass.db.api import user as user_api
+from compass.tasks.client import celery
+from compass.utils import daemonize
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+flags.add('switch_ips',
+ help='comma seperated switch ips',
+ default='')
+flags.add_bool('async',
+ help='ryn in async mode',
+ default=True)
+flags.add('thread_pool_size', type='int',
+ help='thread pool size when run in noasync mode',
+ default=4)
+flags.add('run_interval', type='int',
+ help='run interval in seconds',
+ default=setting.POLLSWITCH_INTERVAL)
+
+
+def pollswitches(switch_ips):
+ """poll switch."""
+ user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
+ poll_switches = []
+ all_switches = dict([
+ (switch['ip'], switch['credentials'])
+ for switch in switch_api.list_switches(user=user)
+ ])
+ if switch_ips:
+ poll_switches = dict([
+ (switch_ip, all_switches[switch_ip])
+ for switch_ip in switch_ips
+ if switch_ip in all_switches
+ ])
+ else:
+ poll_switches = all_switches
+
+ if flags.OPTIONS.async:
+ for switch_ip, switch_credentials in poll_switches.items():
+ celery.send_task(
+ 'compass.tasks.pollswitch',
+ (user.email, switch_ip, switch_credentials)
+ )
+
+ else:
+ try:
+ pool = Pool(processes=flags.OPTIONS.thread_pool_size)
+ for switch_ip, switch_credentials in poll_switches.items():
+ pool.apply_async(
+ poll_switch.poll_switch,
+ (user.email, switch_ip, switch_credentials)
+ )
+ pool.close()
+ pool.join()
+ except Exception as error:
+ logging.error('failed to poll switches %s',
+ poll_switches)
+ logging.exception(error)
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ database.init()
+ logging.info('run poll_switch')
+ daemonize.daemonize(
+ functools.partial(
+ pollswitches,
+ [switch_ip
+ for switch_ip in flags.OPTIONS.switch_ips.split(',')
+ if switch_ip]),
+ flags.OPTIONS.run_interval,
+ pidfile=lockfile.FileLock('/var/run/poll_switch.pid'),
+ stderr=open('/tmp/poll_switch_err.log', 'w+'),
+ stdout=open('/tmp/poll_switch_out.log', 'w+'))
diff --git a/compass-deck/bin/progress_update.py b/compass-deck/bin/progress_update.py
new file mode 100755
index 0000000..cc8c12b
--- /dev/null
+++ b/compass-deck/bin/progress_update.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""main script to run as service to update hosts installing progress."""
+import functools
+import logging
+import os
+import sys
+
+
+current_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(current_dir)
+
+
+import switch_virtualenv
+
+import lockfile
+
+from compass.actions import update_progress
+from compass.db.api import database
+from compass.tasks.client import celery
+from compass.utils import daemonize
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+flags.add_bool('async',
+ help='run in async mode',
+ default=True)
+flags.add('run_interval', type='int',
+ help='run interval in seconds',
+ default=setting.PROGRESS_UPDATE_INTERVAL)
+
+
+def progress_update():
+ """entry function."""
+ if flags.OPTIONS.async:
+ celery.send_task('compass.tasks.update_progress', ())
+ else:
+ try:
+ update_progress.update_progress()
+ except Exception as error:
+ logging.error('failed to update progress')
+ logging.exception(error)
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ database.init()
+ logging.info('run progress update')
+ daemonize.daemonize(
+ progress_update,
+ flags.OPTIONS.run_interval,
+ pidfile=lockfile.FileLock('/var/run/progress_update.pid'),
+ stderr=open('/tmp/progress_update_err.log', 'w+'),
+ stdout=open('/tmp/progress_update_out.log', 'w+'))
diff --git a/compass-deck/bin/query_switch.py b/compass-deck/bin/query_switch.py
new file mode 100755
index 0000000..4b4b2cd
--- /dev/null
+++ b/compass-deck/bin/query_switch.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""query switch."""
+import optparse
+import Queue
+import threading
+import time
+
+from compass.apiclient.restful import Client
+
+
+class AddSwitch(object):
+ """A utility class.
+
+ Handles adding a switch and retrieving corresponding machines
+ associated with the switch.
+ """
+
+ def __init__(self, server_url):
+ print server_url, " ...."
+ self._client = Client(server_url)
+
+ def add_switch(self, queue, ip, snmp_community):
+ """Add a switch with SNMP credentials.
+
+ :param queue: The result holder for the machine details.
+ :type queue: A Queue object(thread-safe).
+ :param ip: The IP address of the switch.
+ :type ip: string.
+ :param snmp_community: The SNMP community string.
+ :type snmp_community: string.
+ """
+ status, resp = self._client.add_switch(ip,
+ version="2c",
+ community=snmp_community)
+ if status > 409:
+ queue.put((ip, (False,
+ "Failed to add the switch (status=%d)" % status)))
+ return
+
+ if status == 409:
+ # This is the case where the switch with the same IP already
+ # exists in the system. We now try to update the switch
+ # with the given credential.
+ switch_id = resp['failedSwitch']
+ status, resp = self._client.update_switch(switch_id,
+ version="2c",
+ community=snmp_community)
+ if status > 202:
+ queue.put((ip, (False,
+ "Failed to update the switch (status=%d)" %
+ status)))
+ return
+
+ switch = resp['switch']
+ state = switch['state']
+ switch_id = switch['id']
+
+ # if the switch state is not in under_monitoring,
+ # wait for the poll switch task
+ while True:
+ status, resp = self._client.get_switch(switch_id)
+ if status > 400:
+ queue.put((ip, (False, "Failed to get switch status")))
+ return
+
+ switch = resp['switch']
+
+ state = switch['state']
+ if state == 'initialized' or state == 'repolling':
+ time.sleep(5)
+ else:
+ break
+
+ if state == 'under_monitoring':
+ # get machines connected to the switch.
+ status, response = self._client.get_machines(switch_id=switch_id)
+ if status == 200:
+ for machine in response['machines']:
+ queue.put((ip, "mac=%s, vlan=%s, port=%s dbid=%d" % (
+ machine['mac'],
+ machine['vlan'],
+ machine['port'],
+ machine['id'])))
+ else:
+ queue.put((ip, (False,
+ "Failed to get machines %s" %
+ response['status'])))
+ else:
+ queue.put((ip, (False, "Switch state is %s" % state)))
+
+if __name__ == "__main__":
+ usage = "usage: %prog [options] switch_ips"
+ parser = optparse.OptionParser(usage)
+
+ parser.add_option("-u", "--server-url", dest="server_url",
+ default="http://localhost/api",
+ help="The Compass Server URL")
+
+ parser.add_option("-c", "--community", dest="community",
+ default="public",
+ help="Switch SNMP community string")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) != 1:
+ parser.error("Wrong number of arguments")
+
+ threads = []
+ queue = Queue.Queue()
+ add_switch = AddSwitch(options.server_url)
+
+ print "Add switch to the server. This may take a while ..."
+ for switch in args[0].split(','):
+ t = threading.Thread(target=add_switch.add_switch,
+ args=(queue, switch, options.community))
+
+ threads.append(t)
+ t.start()
+
+ for t in threads:
+ t.join(60)
+
+ while True:
+ try:
+ ip, result = queue.get(block=False)
+ print ip, " : ", result
+ except Queue.Empty:
+ break
diff --git a/compass-deck/bin/refresh.sh b/compass-deck/bin/refresh.sh
new file mode 100755
index 0000000..d867440
--- /dev/null
+++ b/compass-deck/bin/refresh.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+/opt/compass/bin/refresh_agent.sh
+/opt/compass/bin/refresh_server.sh
diff --git a/compass-deck/bin/refresh_agent.sh b/compass-deck/bin/refresh_agent.sh
new file mode 100755
index 0000000..13c3050
--- /dev/null
+++ b/compass-deck/bin/refresh_agent.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+set -e
+# systemctl restart mysql.service
+# systemctl status mysql.service || exit $?
+# /opt/compass/bin/manage_db.py createdb
+/opt/compass/bin/clean_installers.py --noasync
+/opt/compass/bin/clean_installation_logs.py
+rm -rf /var/ansible/run/*
+# systemctl restart httpd.service
+# systemctl status httpd.service || exit $?
+systemctl restart rsyslog.service
+systemctl status rsyslog.service || exit $?
+systemctl restart redis.service
+systemctl status redis.service || exit $?
+redis-cli flushall
+systemctl restart cobblerd.service
+systemctl status cobblerd.service || exit $?
+systemctl restart compass-celeryd.service
+systemctl status compass-celeryd.service || exit $?
+# systemctl restart compass-progress-updated.service
+# systemctl status compass-progress-updated.service || exit $?
+
diff --git a/compass-deck/bin/refresh_server.sh b/compass-deck/bin/refresh_server.sh
new file mode 100755
index 0000000..a93204a
--- /dev/null
+++ b/compass-deck/bin/refresh_server.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+set -e
+systemctl restart mysql.service
+systemctl status mysql.service || exit $?
+/opt/compass/bin/manage_db.py createdb
+# /opt/compass/bin/clean_installers.py --noasync
+# /opt/compass/bin/clean_installation_logs.py
+# rm -rf /var/ansible/run/*
+systemctl restart httpd.service
+systemctl status httpd.service || exit $?
+systemctl restart rsyslog.service
+systemctl status rsyslog.service || exit $?
+systemctl restart redis.service
+systemctl status redis.service || exit $?
+redis-cli flushall
+# systemctl restart cobblerd.service
+# systemctl status cobblerd.service || exit $?
+# systemctl restart compass-celeryd.service
+# systemctl status compass-celeryd.service || exit $?
+# systemctl restart compass-progress-updated.service
+# systemctl status compass-progress-updated.service || exit $?
+
diff --git a/compass-deck/bin/runserver.py b/compass-deck/bin/runserver.py
new file mode 100755
index 0000000..b8b1a72
--- /dev/null
+++ b/compass-deck/bin/runserver.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""main script to start an instance of compass server ."""
+import logging
+
+from compass.api import app
+from compass.utils import flags
+from compass.utils import logsetting
+
+
+flags.add('server_host',
+ help='server host address',
+ default='0.0.0.0')
+flags.add_bool('debug',
+ help='run in debug mode',
+ default=True)
+
+
+if __name__ == '__main__':
+ flags.init()
+ logsetting.init()
+ logging.info('run server')
+ app.run(host=flags.OPTIONS.server_host, debug=flags.OPTIONS.debug)
diff --git a/compass-deck/bin/switch_virtualenv.py b/compass-deck/bin/switch_virtualenv.py
new file mode 100755
index 0000000..ca843eb
--- /dev/null
+++ b/compass-deck/bin/switch_virtualenv.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""utility switch to virtual env."""
+import os
+import os.path
+import site
+import sys
+
+
+virtual_env = '/root/.virtualenvs/compass-core'
+activate_this = '%s/bin/activate_this.py' % virtual_env
+execfile(activate_this, dict(__file__=activate_this))
+site.addsitedir('%s/lib/python2.6/site-packages' % virtual_env)
+if virtual_env not in sys.path:
+ sys.path.append(virtual_env)
+os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
diff --git a/compass-deck/build.sh b/compass-deck/build.sh
new file mode 100755
index 0000000..81ed6ff
--- /dev/null
+++ b/compass-deck/build.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2016-2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -x
+COMPASS_DIR=${BASH_SOURCE[0]%/*}
+
+yum update -y
+
+yum --nogpgcheck install -y which python python-devel git wget syslinux amqp mod_wsgi httpd bind rsync yum-utils gcc unzip openssl openssl098e ca-certificates mysql-devel mysql MySQL-python python-virtualenv python-setuptools python-pip bc libselinux-python libffi-devel openssl-devel vim net-tools
+
+git clone git://git.openstack.org/openstack/compass-web $COMPASS_DIR/../compass-web/
+rm -rf $COMPASS_DIR/../compass-web/.git
+
+easy_install --upgrade pip
+easy_install --upgrade six
+pip install --upgrade pip
+pip install --upgrade setuptools
+pip install --upgrade virtualenv
+pip install --upgrade redis
+pip install --upgrade virtualenvwrapper
+
+source `which virtualenvwrapper.sh`
+mkvirtualenv --system-site-packages compass-core
+workon compass-core
+cd /root/compass-deck
+pip install -U -r requirements.txt
+cd -
+
+systemctl enable httpd
+mkdir -p /var/log/httpd
+chmod -R 777 /var/log/httpd
+mkdir -p /var/www/compass_web/v2.5
+cp -rf $COMPASS_DIR/../compass-web/v2.5/target/* /var/www/compass_web/v2.5/
+
+echo "ServerName compass-deck:80" >> /etc/httpd/conf/httpd.conf
+mkdir -p /opt/compass/bin
+mkdir -p /opt/compass/db
+cp -f $COMPASS_DIR/misc/apache/{ods-server.conf,http_pip.conf,images.conf,packages.conf} \
+/etc/httpd/conf.d/
+cp -rf $COMPASS_DIR/bin/* /opt/compass/bin/
+mkdir -p /var/www/compass
+ln -s -f /opt/compass/bin/compass_wsgi.py /var/www/compass/compass.wsgi
+cp -rf /usr/lib64/libcrypto.so.6 /usr/lib64/libcrypto.so
+
+mkdir -p /var/log/compass
+chmod -R 777 /var/log/compass
+chmod -R 777 /opt/compass/db
+mkdir -p $COMPASS_DIR/compass
+mv $COMPASS_DIR/{actions,api,apiclient,utils,db,tasks,deployment} $COMPASS_DIR/compass/
+touch $COMPASS_DIR/compass/__init__.py
+source `which virtualenvwrapper.sh`
+workon compass-core
+cd /root/compass-deck
+python setup.py install
+usermod -a -G root apache
+
+yum clean all
+
+cp $COMPASS_DIR/start.sh /usr/local/bin/start.sh
+set +x
diff --git a/compass-deck/db/__init__.py b/compass-deck/db/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-deck/db/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/db/api/__init__.py b/compass-deck/db/api/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-deck/db/api/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/db/api/adapter.py b/compass-deck/db/api/adapter.py
new file mode 100644
index 0000000..c3ad48d
--- /dev/null
+++ b/compass-deck/db/api/adapter.py
@@ -0,0 +1,313 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Adapter related database operations."""
+import logging
+import re
+
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+OSES = None
+OS_INSTALLERS = None
+PACKAGE_INSTALLERS = None
+ADAPTERS = None
+ADAPTERS_FLAVORS = None
+ADAPTERS_ROLES = None
+
+
+def _get_oses_from_configuration():
+ """Get all os configs from os configuration dir.
+
+ Example: {
+ <os_name>: {
+ 'name': <os_name>,
+ 'id': <os_name>,
+ 'os_id': <os_name>,
+ 'deployable': True
+ }
+ }
+ """
+ configs = util.load_configs(setting.OS_DIR)
+ systems = {}
+ for config in configs:
+ logging.info('get config %s', config)
+ system_name = config['NAME']
+ parent_name = config.get('PARENT', None)
+ system = {
+ 'name': system_name,
+ 'id': system_name,
+ 'os_id': system_name,
+ 'parent': parent_name,
+ 'parent_id': parent_name,
+ 'deployable': config.get('DEPLOYABLE', False)
+ }
+ systems[system_name] = system
+ parents = {}
+ for name, system in systems.items():
+ parent = system.get('parent', None)
+ parents[name] = parent
+ for name, system in systems.items():
+ util.recursive_merge_dict(name, systems, parents)
+ return systems
+
+
+def _get_installers_from_configuration(configs):
+ """Get installers from configurations.
+
+ Example: {
+ <installer_isntance>: {
+ 'alias': <instance_name>,
+ 'id': <instance_name>,
+ 'name': <name>,
+ 'settings': <dict pass to installer plugin>
+ }
+ }
+ """
+ installers = {}
+ for config in configs:
+ name = config['NAME']
+ instance_name = config.get('INSTANCE_NAME', name)
+ installers[instance_name] = {
+ 'alias': instance_name,
+ 'id': instance_name,
+ 'name': name,
+ 'settings': config.get('SETTINGS', {})
+ }
+ return installers
+
+
+def _get_os_installers_from_configuration():
+ """Get os installers from os installer config dir."""
+ configs = util.load_configs(setting.OS_INSTALLER_DIR)
+ return _get_installers_from_configuration(configs)
+
+
+def _get_package_installers_from_configuration():
+ """Get package installers from package installer config dir."""
+ configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
+ return _get_installers_from_configuration(configs)
+
+
+def _get_adapters_from_configuration():
+ """Get adapters from adapter config dir."""
+ configs = util.load_configs(setting.ADAPTER_DIR)
+ adapters = {}
+ for config in configs:
+ logging.info('add config %s to adapter', config)
+ if 'OS_INSTALLER' in config:
+ os_installer = OS_INSTALLERS[config['OS_INSTALLER']]
+ else:
+ os_installer = None
+
+ if 'PACKAGE_INSTALLER' in config:
+ package_installer = PACKAGE_INSTALLERS[
+ config['PACKAGE_INSTALLER']
+ ]
+ else:
+ package_installer = None
+
+ adapter_name = config['NAME']
+ parent_name = config.get('PARENT', None)
+ adapter = {
+ 'name': adapter_name,
+ 'id': adapter_name,
+ 'parent': parent_name,
+ 'parent_id': parent_name,
+ 'display_name': config.get('DISPLAY_NAME', adapter_name),
+ 'os_installer': os_installer,
+ 'package_installer': package_installer,
+ 'deployable': config.get('DEPLOYABLE', False),
+ 'health_check_cmd': config.get('HEALTH_CHECK_COMMAND', None),
+ 'supported_oses': [],
+ 'roles': [],
+ 'flavors': []
+ }
+ supported_os_patterns = [
+ re.compile(supported_os_pattern)
+ for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', [])
+ ]
+ for os_name, os in OSES.items():
+ if not os.get('deployable', False):
+ continue
+ for supported_os_pattern in supported_os_patterns:
+ if supported_os_pattern.match(os_name):
+ adapter['supported_oses'].append(os)
+ break
+ adapters[adapter_name] = adapter
+
+ parents = {}
+ for name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for name, adapter in adapters.items():
+ util.recursive_merge_dict(name, adapters, parents)
+ return adapters
+
+
+def _add_roles_from_configuration():
+ """Get roles from roles config dir and update to adapters."""
+ configs = util.load_configs(setting.ADAPTER_ROLE_DIR)
+ for config in configs:
+ logging.info(
+ 'add config %s to role', config
+ )
+ adapter_name = config['ADAPTER_NAME']
+ adapter = ADAPTERS[adapter_name]
+ adapter_roles = ADAPTERS_ROLES.setdefault(adapter_name, {})
+ for role_dict in config['ROLES']:
+ role_name = role_dict['role']
+ display_name = role_dict.get('display_name', role_name)
+ adapter_roles[role_name] = {
+ 'name': role_name,
+ 'id': '%s:%s' % (adapter_name, role_name),
+ 'adapter_id': adapter_name,
+ 'adapter_name': adapter_name,
+ 'display_name': display_name,
+ 'description': role_dict.get('description', display_name),
+ 'optional': role_dict.get('optional', False)
+ }
+ parents = {}
+ for name, adapter in ADAPTERS.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
+ util.recursive_merge_dict(adapter_name, ADAPTERS_ROLES, parents)
+ for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
+ adapter = ADAPTERS[adapter_name]
+ adapter['roles'] = adapter_roles.values()
+
+
+def _add_flavors_from_configuration():
+ """Get flavors from flavor config dir and update to adapters."""
+ configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)
+ for config in configs:
+ logging.info('add config %s to flavor', config)
+ adapter_name = config['ADAPTER_NAME']
+ adapter = ADAPTERS[adapter_name]
+ adapter_flavors = ADAPTERS_FLAVORS.setdefault(adapter_name, {})
+ adapter_roles = ADAPTERS_ROLES[adapter_name]
+ for flavor_dict in config['FLAVORS']:
+ flavor_name = flavor_dict['flavor']
+ flavor_id = '%s:%s' % (adapter_name, flavor_name)
+ flavor = {
+ 'name': flavor_name,
+ 'id': flavor_id,
+ 'adapter_id': adapter_name,
+ 'adapter_name': adapter_name,
+ 'display_name': flavor_dict.get('display_name', flavor_name),
+ 'template': flavor_dict.get('template', None)
+ }
+ flavor_roles = flavor_dict.get('roles', [])
+ roles_in_flavor = []
+ for flavor_role in flavor_roles:
+ if isinstance(flavor_role, basestring):
+ role_name = flavor_role
+ role_in_flavor = {
+ 'name': role_name,
+ 'flavor_id': flavor_id
+ }
+ else:
+ role_in_flavor = flavor_role
+ role_in_flavor['flavor_id'] = flavor_id
+ if 'role' in role_in_flavor:
+ role_in_flavor['name'] = role_in_flavor['role']
+ del role_in_flavor['role']
+ role_name = role_in_flavor['name']
+ role = adapter_roles[role_name]
+ util.merge_dict(role_in_flavor, role, override=False)
+ roles_in_flavor.append(role_in_flavor)
+ flavor['roles'] = roles_in_flavor
+ adapter_flavors[flavor_name] = flavor
+ parents = {}
+ for name, adapter in ADAPTERS.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for adapter_name, adapter_roles in ADAPTERS_FLAVORS.items():
+ util.recursive_merge_dict(adapter_name, ADAPTERS_FLAVORS, parents)
+ for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
+ adapter = ADAPTERS[adapter_name]
+ adapter['flavors'] = adapter_flavors.values()
+
+
+def load_adapters_internal(force_reload=False):
+ """Load adapter related configurations into memory.
+
+ If force_reload, reload all configurations even it is loaded already.
+ """
+ global OSES
+ if force_reload or OSES is None:
+ OSES = _get_oses_from_configuration()
+ global OS_INSTALLERS
+ if force_reload or OS_INSTALLERS is None:
+ OS_INSTALLERS = _get_os_installers_from_configuration()
+ global PACKAGE_INSTALLERS
+ if force_reload or PACKAGE_INSTALLERS is None:
+ PACKAGE_INSTALLERS = _get_package_installers_from_configuration()
+ global ADAPTERS
+ if force_reload or ADAPTERS is None:
+ ADAPTERS = _get_adapters_from_configuration()
+ global ADAPTERS_ROLES
+ if force_reload or ADAPTERS_ROLES is None:
+ ADAPTERS_ROLES = {}
+ _add_roles_from_configuration()
+ global ADAPTERS_FLAVORS
+ if force_reload or ADAPTERS_FLAVORS is None:
+ ADAPTERS_FLAVORS = {}
+ _add_flavors_from_configuration()
+
+
+def get_adapters_internal(force_reload=False):
+ """Get all deployable adapters."""
+ load_adapters_internal(force_reload=force_reload)
+ adapter_mapping = {}
+ for adapter_name, adapter in ADAPTERS.items():
+ if adapter.get('deployable'):
+ # TODO(xicheng): adapter should be filtered before
+ # return to caller.
+ adapter_mapping[adapter_name] = adapter
+ else:
+ logging.info(
+ 'ignore adapter %s since it is not deployable',
+ adapter_name
+ )
+ return adapter_mapping
+
+
+def get_flavors_internal(force_reload=False):
+ """Get all deployable flavors."""
+ load_adapters_internal(force_reload=force_reload)
+ adapter_flavor_mapping = {}
+ for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
+ adapter = ADAPTERS.get(adapter_name, {})
+ for flavor_name, flavor in adapter_flavors.items():
+ if adapter.get('deployable'):
+ # TODO(xicheng): flavor dict should be filtered before
+ # return to caller.
+ adapter_flavor_mapping.setdefault(
+ adapter_name, {}
+ )[flavor_name] = flavor
+ else:
+ logging.info(
+ 'ignore adapter %s since it is not deployable',
+ adapter_name
+ )
+
+ return adapter_flavor_mapping
diff --git a/compass-deck/db/api/adapter_holder.py b/compass-deck/db/api/adapter_holder.py
new file mode 100644
index 0000000..91c65c4
--- /dev/null
+++ b/compass-deck/db/api/adapter_holder.py
@@ -0,0 +1,155 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Adapter related object holder."""
+import logging
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+
+
+SUPPORTED_FIELDS = [
+ 'name',
+]
+RESP_FIELDS = [
+ 'id', 'name', 'roles', 'flavors',
+ 'os_installer', 'package_installer',
+ 'supported_oses', 'display_name', 'health_check_cmd'
+]
+RESP_OS_FIELDS = [
+ 'id', 'name', 'os_id'
+]
+RESP_ROLES_FIELDS = [
+ 'id', 'name', 'display_name', 'description', 'optional'
+]
+RESP_FLAVORS_FIELDS = [
+ 'id', 'adapter_id', 'adapter_name', 'name', 'display_name',
+ 'template', 'roles'
+]
+
+
+ADAPTER_MAPPING = None
+FLAVOR_MAPPING = None
+
+
+def load_adapters(force_reload=False):
+ global ADAPTER_MAPPING
+ if force_reload or ADAPTER_MAPPING is None:
+ logging.info('load adapters into memory')
+ ADAPTER_MAPPING = adapter_api.get_adapters_internal(
+ force_reload=force_reload
+ )
+
+
+def load_flavors(force_reload=False):
+ global FLAVOR_MAPPING
+ if force_reload or FLAVOR_MAPPING is None:
+ logging.info('load flavors into memory')
+ FLAVOR_MAPPING = {}
+ adapters_flavors = adapter_api.get_flavors_internal(
+ force_reload=force_reload
+ )
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ for flavor_name, flavor in adapter_flavors.items():
+ FLAVOR_MAPPING['%s:%s' % (adapter_name, flavor_name)] = flavor
+
+
+def _filter_adapters(adapter_config, filter_name, filter_value):
+ if filter_name not in adapter_config:
+ return False
+ if isinstance(filter_value, list):
+ return bool(
+ adapter_config[filter_name] in filter_value
+ )
+ elif isinstance(filter_value, dict):
+ return all([
+ _filter_adapters(
+ adapter_config[filter_name],
+ sub_filter_key, sub_filter_value
+ )
+ for sub_filter_key, sub_filter_value in filter_value.items()
+ ])
+ else:
+ return adapter_config[filter_name] == filter_value
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_ADAPTERS
+)
+@utils.output_filters(name=utils.general_filter_callback)
+@utils.wrap_to_dict(
+ RESP_FIELDS,
+ supported_oses=RESP_OS_FIELDS,
+ roles=RESP_ROLES_FIELDS,
+ flavors=RESP_FLAVORS_FIELDS
+)
+def list_adapters(user=None, session=None, **filters):
+ """list adapters."""
+ load_adapters()
+ return ADAPTER_MAPPING.values()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_ADAPTERS
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS,
+ supported_oses=RESP_OS_FIELDS,
+ roles=RESP_ROLES_FIELDS,
+ flavors=RESP_FLAVORS_FIELDS
+)
+def get_adapter(adapter_id, user=None, session=None, **kwargs):
+ """get adapter."""
+ load_adapters()
+ if adapter_id not in ADAPTER_MAPPING:
+ raise exception.RecordNotExists(
+ 'adpater %s does not exist' % adapter_id
+ )
+ return ADAPTER_MAPPING[adapter_id]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
+def list_flavors(user=None, session=None, **filters):
+ """List flavors."""
+ load_flavors()
+ return FLAVOR_MAPPING.values()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
+def get_flavor(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor."""
+ load_flavors()
+ if flavor_id not in FLAVOR_MAPPING:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ return FLAVOR_MAPPING[flavor_id]
diff --git a/compass-deck/db/api/cluster.py b/compass-deck/db/api/cluster.py
new file mode 100644
index 0000000..7a7022c
--- /dev/null
+++ b/compass-deck/db/api/cluster.py
@@ -0,0 +1,2444 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cluster database operations."""
+import copy
+import functools
+import logging
+import re
+
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import database
+from compass.db.api import metadata_holder as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = [
+ 'name', 'os_name', 'owner',
+ 'adapter_name', 'flavor_name'
+]
+SUPPORTED_CLUSTERHOST_FIELDS = []
+RESP_FIELDS = [
+ 'id', 'name', 'os_name', 'os_id', 'adapter_id', 'flavor_id',
+ 'reinstall_distributed_system', 'flavor',
+ 'distributed_system_installed',
+ 'owner', 'adapter_name', 'flavor_name',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTERHOST_FIELDS = [
+ 'id', 'host_id', 'clusterhost_id', 'machine_id',
+ 'name', 'hostname', 'roles', 'os_installer',
+ 'cluster_id', 'clustername', 'location', 'tag',
+ 'networks', 'mac', 'switch_ip', 'port', 'switches',
+ 'os_installed', 'distributed_system_installed',
+ 'os_name', 'os_id', 'ip',
+ 'reinstall_os', 'reinstall_distributed_system',
+ 'owner', 'cluster_id',
+ 'created_at', 'updated_at',
+ 'patched_roles'
+]
+RESP_CONFIG_FIELDS = [
+ 'os_config',
+ 'package_config',
+ 'config_step',
+ 'config_validated',
+ 'created_at',
+ 'updated_at'
+]
+RESP_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config',
+ 'created_at',
+ 'updated_at'
+]
+RESP_METADATA_FIELDS = [
+ 'os_config', 'package_config'
+]
+RESP_CLUSTERHOST_CONFIG_FIELDS = [
+ 'package_config',
+ 'os_config',
+ 'config_step',
+ 'config_validated',
+ 'networks',
+ 'created_at',
+ 'updated_at'
+]
+RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config',
+ 'created_at',
+ 'updated_at'
+]
+RESP_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity',
+ 'status', 'ready',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTERHOST_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity',
+ 'ready', 'created_at', 'updated_at'
+]
+RESP_REVIEW_FIELDS = [
+ 'cluster', 'hosts'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'cluster', 'hosts'
+]
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+ADDED_FIELDS = ['name', 'adapter_id', 'os_id']
+OPTIONAL_ADDED_FIELDS = ['flavor_id']
+UPDATED_FIELDS = ['name', 'reinstall_distributed_system']
+ADDED_HOST_FIELDS = ['machine_id']
+UPDATED_HOST_FIELDS = ['name', 'reinstall_os']
+UPDATED_CLUSTERHOST_FIELDS = ['roles', 'patched_roles']
+PATCHED_CLUSTERHOST_FIELDS = ['patched_roles']
+UPDATED_CONFIG_FIELDS = [
+ 'put_os_config', 'put_package_config', 'config_step'
+]
+UPDATED_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config', 'deployed_package_config'
+]
+PATCHED_CONFIG_FIELDS = [
+ 'patched_os_config', 'patched_package_config', 'config_step'
+]
+UPDATED_CLUSTERHOST_CONFIG_FIELDS = [
+ 'put_os_config',
+ 'put_package_config'
+]
+PATCHED_CLUSTERHOST_CONFIG_FIELDS = [
+ 'patched_os_config',
+ 'patched_package_config'
+]
+UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config'
+]
+UPDATED_CLUSTERHOST_STATE_FIELDS = [
+ 'state', 'percentage', 'message', 'severity'
+]
+UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS = [
+ 'ready'
+]
+UPDATED_CLUSTER_STATE_FIELDS = ['state']
+IGNORE_UPDATED_CLUSTER_STATE_FIELDS = ['percentage', 'message', 'severity']
+UPDATED_CLUSTER_STATE_INTERNAL_FIELDS = ['ready']
+RESP_CLUSTERHOST_LOG_FIELDS = [
+ 'clusterhost_id', 'id', 'host_id', 'cluster_id',
+ 'filename', 'position', 'partial_line',
+ 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+ADDED_CLUSTERHOST_LOG_FIELDS = [
+ 'filename'
+]
+UPDATED_CLUSTERHOST_LOG_FIELDS = [
+ 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_clusters(user=None, session=None, **filters):
+ """List clusters."""
+ clusters = utils.list_db_objects(
+ session, models.Cluster, **filters
+ )
+ logging.info('user is %s', user.email)
+ if not user.is_admin and len(clusters):
+ clusters = [c for c in clusters if c.owner == user.email]
+ return clusters
+
+
+def _get_cluster(cluster_id, session=None, **kwargs):
+ """Get cluster by id."""
+ if isinstance(cluster_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Cluster, id=cluster_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'cluster id %s type is not int compatible' % cluster_id
+ )
+
+
+def get_cluster_internal(cluster_id, session=None, **kwargs):
+ """Helper function to get cluster.
+
+ Should be only used by other files under db/api.
+ """
+ return _get_cluster(cluster_id, session=session, **kwargs)
+
+
+def _get_cluster_host(
+ cluster_id, host_id, session=None, **kwargs
+):
+ """Get clusterhost by cluster id and host id."""
+ cluster = _get_cluster(cluster_id, session=session, **kwargs)
+ from compass.db.api import host as host_api
+ host = host_api.get_host_internal(host_id, session=session, **kwargs)
+ return utils.get_db_object(
+ session, models.ClusterHost,
+ cluster_id=cluster.id,
+ host_id=host.id,
+ **kwargs
+ )
+
+
+def _get_clusterhost(clusterhost_id, session=None, **kwargs):
+ """Get clusterhost by clusterhost id."""
+ if isinstance(clusterhost_id, (int, long)):
+ return utils.get_db_object(
+ session, models.ClusterHost,
+ clusterhost_id=clusterhost_id,
+ **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'clusterhost id %s type is not int compatible' % clusterhost_id
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_cluster(
+ cluster_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get cluster info."""
+ return _get_cluster(
+ cluster_id,
+ session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS)
+def is_cluster_os_ready(
+ cluster_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ cluster = utils.get_db_object(
+ session, models.Cluster, exception_when_missing, id=cluster_id)
+
+ all_states = ([i.host.state.ready for i in cluster.clusterhosts])
+
+ logging.info("is_cluster_os_ready: all_states %s" % all_states)
+
+ return all(all_states)
+
+
+def check_cluster_validated(cluster):
+ """Check cluster is validated."""
+ if not cluster.config_validated:
+ raise exception.Forbidden(
+ 'cluster %s is not validated' % cluster.name
+ )
+
+
+def check_clusterhost_validated(clusterhost):
+ """Check clusterhost is validated."""
+ if not clusterhost.config_validated:
+ raise exception.Forbidden(
+ 'clusterhost %s is not validated' % clusterhost.name
+ )
+
+
+def check_cluster_editable(
+ cluster, user=None,
+ check_in_installing=False
+):
+ """Check if cluster is editable.
+
+ If we try to set cluster
+ reinstall_distributed_system attribute or any
+ checking to make sure the cluster is not in installing state,
+ we can set check_in_installing to True.
+ Otherwise we will make sure the cluster is not in deploying or
+ deployed.
+ If user is not admin or not the owner of the cluster, the check
+ will fail to make sure he can not update the cluster attributes.
+ """
+ if check_in_installing:
+ if cluster.state.state == 'INSTALLING':
+ raise exception.Forbidden(
+ 'cluster %s is not editable '
+ 'when state is installing' % cluster.name
+ )
+# elif (
+# cluster.flavor_name and
+# not cluster.reinstall_distributed_system
+# ):
+# raise exception.Forbidden(
+# 'cluster %s is not editable '
+# 'when not to be reinstalled' % cluster.name
+# )
+ if user and not user.is_admin and cluster.creator_id != user.id:
+ raise exception.Forbidden(
+ 'cluster %s is not editable '
+ 'when user is not admin or cluster owner' % cluster.name
+ )
+
+
+def is_cluster_editable(
+ cluster, user=None,
+ check_in_installing=False
+):
+ """Get if cluster is editble."""
+ try:
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=check_in_installing
+ )
+ return True
+ except exception.Forbidden:
+ return False
+
+
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_cluster(
+ exception_when_existing=True,
+ name=None, adapter_id=None, flavor_id=None,
+ user=None, session=None, **kwargs
+):
+ """Create a cluster."""
+ adapter = adapter_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ # if flavor_id is not None, also set flavor field.
+ # In future maybe we can move the use of flavor from
+ # models.py to db/api and explictly get flavor when
+ # needed instead of setting flavor into cluster record.
+ flavor = {}
+ if flavor_id:
+ flavor = adapter_api.get_flavor(
+ flavor_id,
+ user=user, session=session
+ )
+ if flavor['adapter_id'] != adapter['id']:
+ raise exception.InvalidParameter(
+ 'flavor %s is not of adapter %s' % (
+ flavor_id, adapter_id
+ )
+ )
+
+ cluster = utils.add_db_object(
+ session, models.Cluster, exception_when_existing,
+ name, user.id, adapter_id=adapter_id,
+ flavor_id=flavor_id, flavor=flavor, **kwargs
+ )
+ return cluster
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_cluster(cluster_id, user=None, session=None, **kwargs):
+ """Update a cluster."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=(
+ kwargs.get('reinstall_distributed_system', False)
+ )
+ )
+ return utils.update_db_object(session, cluster, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS + ['status', 'cluster', 'hosts'],
+ cluster=RESP_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def del_cluster(
+ cluster_id, force=False, from_database_only=False,
+ delete_underlying_host=False, user=None, session=None, **kwargs
+):
+ """Delete a cluster.
+
+ If force, the cluster will be deleted anyway. It is used by cli to
+ force clean a cluster in any case.
+ If from_database_only, the cluster recored will only be removed from
+ database. Otherwise, a del task is sent to celery to do clean deletion.
+ If delete_underlying_host, all hosts under this cluster will also be
+ deleted.
+ The backend will call del_cluster again with from_database_only set
+ when it has done the deletion work on os installer/package installer.
+ """
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ logging.debug(
+ 'delete cluster %s with force=%s '
+ 'from_database_only=%s delete_underlying_host=%s',
+ cluster.id, force, from_database_only, delete_underlying_host
+ )
+ # force set cluster state to ERROR and the state of any clusterhost
+ # in the cluster to ERROR when we want to delete the cluster anyway
+ # even the cluster is in installing or already installed.
+ # It let the api know the deleting is in doing when backend is doing
+ # the real deleting.
+ # In future we may import a new state like INDELETE to indicate
+ # the deleting is processing.
+ # We need discuss about if we can delete a cluster when it is already
+ # installed by api.
+ for clusterhost in cluster.clusterhosts:
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ if delete_underlying_host:
+ host = clusterhost.host
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ if cluster.state.state != 'UNINITIALIZED' and force:
+ cluster.state.state = 'ERROR'
+
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+
+ # delete underlying host if delete_underlying_host is set.
+ if delete_underlying_host:
+ for clusterhost in cluster.clusterhosts:
+ # delete underlying host only user has permission.
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ if host_api.is_host_editable(
+ host, user=user, check_in_installing=True
+ ):
+ # Delete host record directly in database when there is no need
+ # to do the deletion in backend or from_database_only is set.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ utils.del_db_object(
+ session, host
+ )
+
+ # Delete cluster record directly in database when there
+ # is no need to do the deletion in backend or from_database_only is set.
+ if cluster.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(
+ session, cluster
+ )
+ else:
+ from compass.tasks import client as celery_client
+ logging.info('send del cluster %s task to celery', cluster_id)
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_cluster',
+ (
+ user.email, cluster.id,
+ [
+ clusterhost.host_id
+ for clusterhost in cluster.clusterhosts
+ ],
+ delete_underlying_host
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'delete action is sent',
+ 'cluster': cluster,
+ 'hosts': cluster.clusterhosts
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def get_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster config."""
+ return _get_cluster(cluster_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def get_cluster_deployed_config(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster deployed config."""
+ return _get_cluster(cluster_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_cluster_metadata(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster metadata.
+
+ If no flavor in the cluster, it means this is a os only cluster.
+ We ignore package metadata for os only cluster.
+ """
+ cluster = _get_cluster(cluster_id, session=session)
+ metadatas = {}
+ os_name = cluster.os_name
+ if os_name:
+ metadatas.update(
+ metadata_api.get_os_metadata(
+ os_name, session=session
+ )
+ )
+ flavor_id = cluster.flavor_id
+ if flavor_id:
+ metadatas.update(
+ metadata_api.get_flavor_metadata(
+ flavor_id,
+ user=user, session=session
+ )
+ )
+
+ return metadatas
+
+
+def _cluster_os_config_validates(
+ config, cluster, session=None, user=None, **kwargs
+):
+ """Check cluster os config validation."""
+ metadata_api.validate_os_config(
+ config, cluster.os_id
+ )
+
+
+def _cluster_package_config_validates(
+ config, cluster, session=None, user=None, **kwargs
+):
+ """Check cluster package config validation."""
+ metadata_api.validate_flavor_config(
+ config, cluster.flavor_id
+ )
+
+
+@utils.input_validates_with_args(
+ put_os_config=_cluster_os_config_validates,
+ put_package_config=_cluster_package_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_cluster_os_config_validates,
+ package_config=_cluster_package_config_validates
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def _update_cluster_config(cluster, session=None, user=None, **kwargs):
+ """Update a cluster config."""
+ check_cluster_editable(cluster, user=user)
+ return utils.update_db_object(
+ session, cluster, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config,
+# package_config to deployed_package_config
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_DEPLOYED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def update_cluster_deployed_config(
+ cluster_id, user=None, session=None, **kwargs
+):
+ """Update cluster deployed config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ return utils.update_db_object(
+ session, cluster, **kwargs
+ )
+
+
+# replace os_config to put_os_config,
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+def update_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """Update cluster config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return _update_cluster_config(
+ cluster, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs.
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+def patch_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """patch cluster config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return _update_cluster_config(
+ cluster, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def del_cluster_config(cluster_id, user=None, session=None):
+ """Delete a cluster config."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ check_cluster_editable(cluster, user=user)
+ return utils.update_db_object(
+ session, cluster, os_config={},
+ package_config={}, config_validated=False
+ )
+
+
+def _roles_validates(roles, cluster, session=None, user=None):
+ """Check roles is validated to a cluster's roles."""
+ if roles:
+ if not cluster.flavor_name:
+ raise exception.InvalidParameter(
+ 'not flavor in cluster %s' % cluster.name
+ )
+ cluster_roles = [role['name'] for role in cluster.flavor['roles']]
+ for role in roles:
+ if role not in cluster_roles:
+ raise exception.InvalidParameter(
+ 'role %s is not in cluster roles %s' % (
+ role, cluster_roles
+ )
+ )
+
+
+def _cluster_host_roles_validates(
+ value, cluster, host, session=None, user=None, **kwargs
+):
+ """Check clusterhost roles is validated by cluster and host."""
+ _roles_validates(value, cluster, session=session, user=user)
+
+
+def _clusterhost_roles_validates(
+ value, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost roles is validated by clusterhost."""
+ _roles_validates(
+ value, clusterhost.cluster, session=session, user=user
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_HOST_FIELDS,
+ ignore_support_keys=UPDATED_CLUSTERHOST_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+def _add_host_if_not_exist(
+ machine_id, cluster, session=None, user=None, **kwargs
+):
+ """Add underlying host if it does not exist."""
+ from compass.db.api import host as host_api
+ host = host_api.get_host_internal(
+ machine_id, session=session, exception_when_missing=False
+ )
+ if host:
+ if kwargs:
+ # ignore update underlying host if host is not editable.
+ from compass.db.api import host as host_api
+ if host_api.is_host_editable(
+ host, user=cluster.creator,
+ check_in_installing=kwargs.get('reinstall_os', False),
+ ):
+ utils.update_db_object(
+ session, host,
+ **kwargs
+ )
+ else:
+ logging.debug(
+ 'ignore update host host %s '
+ 'since it is not editable' % host.name
+ )
+ else:
+ logging.debug('nothing to update for host %s', host.name)
+ else:
+ from compass.db.api import adapter_holder as adapter_api
+ adapter = adapter_api.get_adapter(
+ cluster.adapter_name, user=user, session=session
+ )
+ host = utils.add_db_object(
+ session, models.Host, False, machine_id,
+ os_name=cluster.os_name,
+ os_installer=adapter['os_installer'],
+ creator=cluster.creator,
+ **kwargs
+ )
+ return host
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=UPDATED_HOST_FIELDS
+)
+@utils.input_validates_with_args(
+ roles=_cluster_host_roles_validates
+)
+def _add_clusterhost_only(
+ cluster, host,
+ exception_when_existing=False,
+ session=None, user=None,
+ **kwargs
+):
+ """Get clusterhost only."""
+ if not cluster.state.state == "UNINITIALIZED":
+ cluster.state.ready = False
+ cluster.state.state = "UNINITIALIZED"
+ cluster.state.percentage = 0.0
+ utils.update_db_object(session, cluster.state, state="UNINITIALIZED")
+
+ return utils.add_db_object(
+ session, models.ClusterHost, exception_when_existing,
+ cluster.id, host.id, **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_HOST_FIELDS,
+ optional_support_keys=UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+def _add_clusterhost(
+ cluster,
+ exception_when_existing=False,
+ session=None, user=None, machine_id=None, **kwargs
+):
+ """Add clusterhost and add underlying host if it does not exist."""
+ host = _add_host_if_not_exist(
+ machine_id, cluster, session=session,
+ user=user, **kwargs
+ )
+
+ return _add_clusterhost_only(
+ cluster, host, exception_when_existing=exception_when_existing,
+ session=session, user=user, **kwargs
+ )
+
+
+def _add_clusterhosts(cluster, machines, session=None, user=None):
+ """Add machines to cluster.
+
+ Args:
+ machines: list of dict which contains clusterost attr to update.
+
+ Examples:
+ [{'machine_id': 1, 'name': 'host1'}]
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state == 'UPDATE_PREPARING'
+ for machine_dict in machines:
+ _add_clusterhost(
+ cluster, session=session, user=user, **machine_dict
+ )
+
+
+def _remove_clusterhosts(cluster, hosts, session=None, user=None):
+ """Remove hosts from cluster.
+
+ Args:
+ hosts: list of host id.
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ utils.del_db_objects(
+ session, models.ClusterHost,
+ cluster_id=cluster.id, host_id=hosts
+ )
+
+
+def _set_clusterhosts(cluster, machines, session=None, user=None):
+ """set machines to cluster.
+
+ Args:
+ machines: list of dict which contains clusterost attr to update.
+
+ Examples:
+ [{'machine_id': 1, 'name': 'host1'}]
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ utils.del_db_objects(
+ session, models.ClusterHost,
+ cluster_id=cluster.id
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state = 'UPDATE_PREPARING'
+ for machine_dict in machines:
+ _add_clusterhost(
+ cluster, True, session=session, user=user, **machine_dict
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def list_cluster_hosts(cluster_id, user=None, session=None, **filters):
+ """List clusterhosts of a cluster."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return utils.list_db_objects(
+ session, models.ClusterHost, cluster_id=cluster.id,
+ **filters
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def list_clusterhosts(user=None, session=None, **filters):
+ """List all clusterhosts."""
+ return utils.list_db_objects(
+ session, models.ClusterHost, **filters
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def get_cluster_host(
+ cluster_id, host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get clusterhost info by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session,
+ exception_when_missing=exception_when_missing,
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def get_clusterhost(
+ clusterhost_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get clusterhost info by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session,
+ exception_when_missing=exception_when_missing,
+ user=user
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def add_cluster_host(
+ cluster_id, exception_when_existing=True,
+ user=None, session=None, **kwargs
+):
+ """Add a host to a cluster."""
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state = 'UPDATE_PREPARING'
+ return _add_clusterhost(
+ cluster, exception_when_existing,
+ session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_HOST_FIELDS,
+ ignore_support_keys=(
+ UPDATED_CLUSTERHOST_FIELDS +
+ PATCHED_CLUSTERHOST_FIELDS
+ )
+)
+def _update_host_if_necessary(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update underlying host if there is something to update."""
+ host = clusterhost.host
+ if kwargs:
+ # ignore update underlying host if the host is not editable.
+ from compass.db.api import host as host_api
+ if host_api.is_host_editable(
+ host, user=clusterhost.cluster.creator,
+ check_in_installing=kwargs.get('reinstall_os', False),
+ ):
+ utils.update_db_object(
+ session, host,
+ **kwargs
+ )
+ else:
+ logging.debug(
+ 'ignore update host %s since it is not editable' % host.name
+ )
+ else:
+ logging.debug(
+ 'nothing to update for host %s', host.name
+ )
+ return host
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_CLUSTERHOST_FIELDS +
+ PATCHED_CLUSTERHOST_FIELDS
+ ),
+ ignore_support_keys=UPDATED_HOST_FIELDS
+)
+@utils.input_validates_with_args(
+ roles=_clusterhost_roles_validates,
+ patched_roles=_clusterhost_roles_validates
+)
+def _update_clusterhost_only(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update clusterhost only."""
+ check_cluster_editable(clusterhost.cluster, user=user)
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def _update_clusterhost(clusterhost, session=None, user=None, **kwargs):
+ """Update clusterhost and underlying host if necessary."""
+ _update_host_if_necessary(
+ clusterhost, session=session, user=user, **kwargs
+ )
+ return _update_clusterhost_only(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def update_cluster_host(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Update clusterhost by cluster id and host id."""
+ logging.info('updating kwargs: %s', kwargs)
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def update_clusterhost(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Update clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace roles to patched_roles in kwargs.
+# It tells db roles field will be patched.
+@utils.replace_filters(
+ roles='patched_roles'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def patch_cluster_host(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Patch clusterhost by cluster id and host id."""
+ logging.info("kwargs are %s", kwargs)
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ updated_clusterhost = _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+ return updated_clusterhost
+
+
+# replace roles to patched_roles in kwargs.
+# It tells db roles field will be patched.
+@utils.replace_filters(
+ roles='patched_roles'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def patch_clusterhost(
+ clusterhost_id, user=None, session=None,
+ **kwargs
+):
+ """Patch clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER_HOST
+)
+@utils.wrap_to_dict(
+ RESP_CLUSTERHOST_FIELDS + ['status', 'host'],
+ host=RESP_CLUSTERHOST_FIELDS
+)
+def _del_cluster_host(
+ clusterhost,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """delete clusterhost.
+
+ If force, the cluster host will be deleted anyway.
+ If from_database_only, the cluster host recored will only be
+ deleted from database. Otherwise a celery task sent to do
+ clean deletion.
+ If delete_underlying_host, the underlying host will also be deleted.
+ The backend will call _del_cluster_host again when the clusterhost is
+ deleted from os installer/package installer with from_database_only
+ set.
+ """
+ # force set clusterhost state to ERROR when we want to delete the
+ # clusterhost anyway even the clusterhost is in installing or already
+ # installed. It let the api know the deleting is in doing when backend
+ # is doing the real deleting. In future we may import a new state like
+ # INDELETE to indicate the deleting is processing.
+ # We need discuss about if we can delete a clusterhost when it is already
+ # installed by api.
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ if not force:
+ check_cluster_editable(
+ clusterhost.cluster, user=user,
+ check_in_installing=True
+ )
+ # delete underlying host if delete_underlying_host is set.
+ if delete_underlying_host:
+ host = clusterhost.host
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ # only delete the host when user have the permission to delete it.
+ import compass.db.api.host as host_api
+ if host_api.is_host_editable(
+ host, user=user,
+ check_in_installing=True
+ ):
+ # if there is no need to do the deletion by backend or
+ # from_database_only is set, we only delete the record
+ # in database.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ utils.del_db_object(
+ session, host
+ )
+
+ # if there is no need to do the deletion by backend or
+ # from_database_only is set, we only delete the record in database.
+ if clusterhost.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(
+ session, clusterhost
+ )
+ else:
+ logging.info(
+ 'send del cluster %s host %s task to celery',
+ clusterhost.cluster_id, clusterhost.host_id
+ )
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_cluster_host',
+ (
+ user.email, clusterhost.cluster_id, clusterhost.host_id,
+ delete_underlying_host
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'delete action sent',
+ 'host': clusterhost,
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+def del_cluster_host(
+ cluster_id, host_id,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """Delete clusterhost by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _del_cluster_host(
+ clusterhost, force=force, from_database_only=from_database_only,
+ delete_underlying_host=delete_underlying_host, user=user,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+def del_clusterhost(
+ clusterhost_id,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """Delete clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _del_cluster_host(
+ clusterhost, force=force, from_database_only=from_database_only,
+ delete_underlying_host=delete_underlying_host, user=user,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def get_cluster_host_config(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost config by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def get_cluster_host_deployed_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost deployed config by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def get_clusterhost_config(clusterhost_id, user=None, session=None, **kwargs):
+ """Get clusterhost config by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def get_clusterhost_deployed_config(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost deployed config by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ )
+
+
+def _clusterhost_os_config_validates(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Validate clusterhost's underlying host os config."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ host_api.check_host_editable(host, user=user)
+ metadata_api.validate_os_config(
+ config, host.os_id
+ )
+
+
+def _clusterhost_package_config_validates(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Validate clusterhost's cluster package config."""
+ cluster = clusterhost.cluster
+ check_cluster_editable(cluster, user=user)
+ metadata_api.validate_flavor_config(
+ config, cluster.flavor_id
+ )
+
+
+def _filter_clusterhost_host_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Filter fields if the underlying host is not editable."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ return host_api.is_host_editable(host, user=user)
+
+
+@utils.input_filters(
+ put_os_config=_filter_clusterhost_host_editable,
+ patched_os_config=_filter_clusterhost_host_editable
+)
+@utils.input_validates_with_args(
+ put_os_config=_clusterhost_os_config_validates,
+ put_package_config=_clusterhost_package_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_clusterhost_os_config_validates,
+ package_config=_clusterhost_package_config_validates
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def _update_clusterhost_config(clusterhost, session=None, user=None, **kwargs):
+ """Update clusterhost config."""
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+def _clusterhost_host_validated(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's underlying host is validated."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ host_api.check_host_editable(host, user=user)
+ host_api.check_host_validated(host)
+
+
+def _clusterhost_cluster_validated(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's cluster is validated."""
+ cluster = clusterhost.cluster
+ check_cluster_editable(cluster, user=user)
+ check_clusterhost_validated(clusterhost)
+
+
+@utils.input_filters(
+ deployed_os_config=_filter_clusterhost_host_editable,
+)
+@utils.input_validates_with_args(
+ deployed_os_config=_clusterhost_host_validated,
+ deployed_package_config=_clusterhost_cluster_validated
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def _update_clusterhost_deployed_config(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update clusterhost deployed config."""
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+# replace os_config to put_os_config and
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_cluster_host_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, user=user, session=session, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config and
+# package_config to deployed_package_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_cluster_host_deployed_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost deployed config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_deployed_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to put_os_config and
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_clusterhost_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config and
+# package_config to deployed_package_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_clusterhost_deployed_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost deployed config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_deployed_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def patch_cluster_host_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """patch clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def patch_clusterhost_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """patch clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+def _clusterhost_host_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost underlying host is editable."""
+ from compass.db.api import host as host_api
+ host_api.check_host_editable(clusterhost.host, user=user)
+
+
+def _clusterhost_cluster_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's cluster is editable."""
+ check_cluster_editable(clusterhost.cluster, user=user)
+
+
+@utils.supported_filters(
+ optional_support_keys=['os_config', 'package_config']
+)
+@utils.input_filters(
+ os_config=_filter_clusterhost_host_editable,
+)
+@utils.output_validates_with_args(
+ package_config=_clusterhost_cluster_editable
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def _delete_clusterhost_config(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """delete clusterhost config."""
+ return utils.update_db_object(
+ session, clusterhost, config_validated=False,
+ **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
+)
+def delete_cluster_host_config(
+ cluster_id, host_id, user=None, session=None
+):
+ """Delete a clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _delete_clusterhost_config(
+ clusterhost, session=session, user=user,
+ os_config={}, package_config={}
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def delete_clusterhost_config(clusterhost_id, user=None, session=None):
+ """Delet a clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _delete_clusterhost_config(
+ clusterhost, session=session, user=user,
+ os_config={}, package_config={}
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=['add_hosts', 'remove_hosts', 'set_hosts']
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+@utils.wrap_to_dict(
+ ['hosts'],
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def update_cluster_hosts(
+ cluster_id, add_hosts={}, set_hosts=None,
+ remove_hosts={}, user=None, session=None
+):
+ """Update cluster hosts."""
+ cluster = _get_cluster(cluster_id, session=session)
+ if remove_hosts:
+ _remove_clusterhosts(
+ cluster, session=session, user=user, **remove_hosts
+ )
+ if add_hosts:
+ _add_clusterhosts(
+ cluster, session=session, user=user, **add_hosts
+ )
+ if set_hosts is not None:
+ _set_clusterhosts(
+ cluster, session=session, user=user, **set_hosts
+ )
+
+ return {
+ 'hosts': list_cluster_hosts(cluster_id, session=session)
+ }
+
+
+def validate_clusterhost(clusterhost, session=None):
+ """validate clusterhost."""
+ roles = clusterhost.roles
+ if not roles:
+ if clusterhost.cluster.flavor_name:
+ raise exception.InvalidParameter(
+ 'empty roles for clusterhost %s' % clusterhost.name
+ )
+
+
+def validate_cluster(cluster, session=None):
+ """Validate cluster."""
+ if not cluster.clusterhosts:
+ raise exception.InvalidParameter(
+ 'cluster %s does not have any hosts' % cluster.name
+ )
+ if cluster.flavor_name:
+ cluster_roles = cluster.flavor['roles']
+ else:
+ cluster_roles = []
+ necessary_roles = set([
+ role['name'] for role in cluster_roles if not role.get('optional')
+ ])
+ clusterhost_roles = set([])
+ interface_subnets = {}
+ for clusterhost in cluster.clusterhosts:
+ roles = clusterhost.roles
+ for role in roles:
+ clusterhost_roles.add(role['name'])
+ host = clusterhost.host
+ for host_network in host.host_networks:
+ interface_subnets.setdefault(
+ host_network.interface, set([])
+ ).add(host_network.subnet.subnet)
+ missing_roles = necessary_roles - clusterhost_roles
+ if missing_roles:
+ raise exception.InvalidParameter(
+ 'cluster %s have some roles %s not assigned to any host' % (
+ cluster.name, list(missing_roles)
+ )
+ )
+ for interface, subnets in interface_subnets.items():
+ if len(subnets) > 1:
+ raise exception.InvalidParameter(
+ 'cluster %s multi subnets %s in interface %s' % (
+ cluster.name, list(subnets), interface
+ )
+ )
+
+
+@utils.supported_filters(optional_support_keys=['review'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_REVIEW_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_REVIEW_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_CONFIG_FIELDS
+)
+def review_cluster(cluster_id, review={}, user=None, session=None, **kwargs):
+ """review cluster.
+
+ Args:
+ cluster_id: the cluster id.
+ review: dict contains hosts to be reviewed. either contains key
+ hosts or clusterhosts. where hosts is a list of host id,
+ clusterhosts is a list of clusterhost id.
+ """
+ from compass.db.api import host as host_api
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(cluster, user=user)
+ host_ids = review.get('hosts', [])
+ clusterhost_ids = review.get('clusterhosts', [])
+ clusterhosts = []
+ # Get clusterhosts need to be reviewed.
+ for clusterhost in cluster.clusterhosts:
+ if (
+ clusterhost.clusterhost_id in clusterhost_ids or
+ clusterhost.host_id in host_ids
+ ):
+ clusterhosts.append(clusterhost)
+
+ os_config = copy.deepcopy(cluster.os_config)
+ os_config = metadata_api.autofill_os_config(
+ os_config, cluster.os_id, cluster=cluster
+ )
+ metadata_api.validate_os_config(
+ os_config, cluster.os_id, True
+ )
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ # ignore underlying host os config validation
+ # since the host is not editable
+ if not host_api.is_host_editable(
+ host, user=user, check_in_installing=False
+ ):
+ logging.info(
+ 'ignore update host %s config '
+ 'since it is not editable' % host.name
+ )
+ continue
+ host_os_config = copy.deepcopy(host.os_config)
+ host_os_config = metadata_api.autofill_os_config(
+ host_os_config, host.os_id,
+ host=host
+ )
+ deployed_os_config = util.merge_dict(
+ os_config, host_os_config
+ )
+ metadata_api.validate_os_config(
+ deployed_os_config, host.os_id, True
+ )
+ host_api.validate_host(host)
+ utils.update_db_object(
+ session, host, os_config=host_os_config, config_validated=True
+ )
+
+ package_config = copy.deepcopy(cluster.package_config)
+ if cluster.flavor_name:
+ package_config = metadata_api.autofill_flavor_config(
+ package_config, cluster.flavor_id,
+ cluster=cluster
+ )
+ metadata_api.validate_flavor_config(
+ package_config, cluster.flavor_id, True
+ )
+ for clusterhost in clusterhosts:
+ clusterhost_package_config = copy.deepcopy(
+ clusterhost.package_config
+ )
+ clusterhost_package_config = (
+ metadata_api.autofill_flavor_config(
+ clusterhost_package_config,
+ cluster.flavor_id,
+ clusterhost=clusterhost
+ )
+ )
+ deployed_package_config = util.merge_dict(
+ package_config, clusterhost_package_config
+ )
+ metadata_api.validate_flavor_config(
+ deployed_package_config,
+ cluster.flavor_id, True
+ )
+ validate_clusterhost(clusterhost, session=session)
+ utils.update_db_object(
+ session, clusterhost,
+ package_config=clusterhost_package_config,
+ config_validated=True
+ )
+
+ validate_cluster(cluster, session=session)
+ utils.update_db_object(
+ session, cluster, os_config=os_config, package_config=package_config,
+ config_validated=True
+ )
+ return {
+ 'cluster': cluster,
+ 'hosts': clusterhosts
+ }
+
+
+@utils.supported_filters(optional_support_keys=['deploy'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def deploy_cluster(
+ cluster_id, deploy={}, user=None, session=None, **kwargs
+):
+ """deploy cluster.
+
+ Args:
+ cluster_id: cluster id.
+ deploy: dict contains key either hosts or clusterhosts.
+ deploy['hosts'] is a list of host id,
+ deploy['clusterhosts'] is a list of clusterhost id.
+ """
+ from compass.db.api import host as host_api
+ from compass.tasks import client as celery_client
+ cluster = _get_cluster(cluster_id, session=session)
+ host_ids = deploy.get('hosts', [])
+ clusterhost_ids = deploy.get('clusterhosts', [])
+ clusterhosts = []
+ # get clusterhost to deploy.
+ for clusterhost in cluster.clusterhosts:
+ if (
+ clusterhost.clusterhost_id in clusterhost_ids or
+ clusterhost.host_id in host_ids
+ ):
+ clusterhosts.append(clusterhost)
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ utils.update_db_object(session, cluster.state, state='INITIALIZED')
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ # ignore checking if underlying host is validated if
+ # the host is not editable.
+ if host_api.is_host_editable(host, user=user):
+ host_api.check_host_validated(host)
+ utils.update_db_object(session, host.state, state='INITIALIZED')
+ if cluster.flavor_name:
+ check_clusterhost_validated(clusterhost)
+ utils.update_db_object(
+ session, clusterhost.state, state='INITIALIZED'
+ )
+
+ celery_client.celery.send_task(
+ 'compass.tasks.deploy_cluster',
+ (
+ user.email, cluster_id,
+ [clusterhost.host_id for clusterhost in clusterhosts]
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'deploy action sent',
+ 'cluster': cluster,
+ 'hosts': clusterhosts
+ }
+
+
+@utils.supported_filters(optional_support_keys=['redeploy'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def redeploy_cluster(
+ cluster_id, deploy={}, user=None, session=None, **kwargs
+):
+ """redeploy cluster.
+
+ Args:
+ cluster_id: cluster id.
+ """
+ from compass.db.api import host as host_api
+ from compass.tasks import client as celery_client
+ cluster = _get_cluster(cluster_id, session=session)
+
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ utils.update_db_object(
+ session, cluster.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+ for clusterhost in cluster.clusterhosts:
+ host = clusterhost.host
+ # ignore checking if underlying host is validated if
+ # the host is not editable.
+ host_api.check_host_validated(host)
+ utils.update_db_object(
+ session, host.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+ if cluster.flavor_name:
+ check_clusterhost_validated(clusterhost)
+ utils.update_db_object(
+ session,
+ clusterhost.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+
+ celery_client.celery.send_task(
+ 'compass.tasks.redeploy_cluster',
+ (
+ user.email, cluster_id
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'redeploy action sent',
+ 'cluster': cluster
+ }
+
+
+@utils.supported_filters(optional_support_keys=['apply_patch'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def patch_cluster(cluster_id, user=None, session=None, **kwargs):
+
+ from compass.tasks import client as celery_client
+
+ cluster = _get_cluster(cluster_id, session=session)
+ celery_client.celery.send_task(
+ 'compass.tasks.patch_cluster',
+ (
+ user.email, cluster_id,
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'patch action sent',
+ 'cluster': cluster
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTER_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def get_cluster_state(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster state info."""
+ return _get_cluster(cluster_id, session=session).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_cluster_host_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost state merged with underlying host state."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_cluster_host_self_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost itself state."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).state
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_clusterhost_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost state merged with underlying host state."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_clusterhost_self_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost itself state."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).state
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def update_cluster_host_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update a clusterhost itself state."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ # Modify(harry): without progress_update.py to update cluster state
+ # update cluster state here
+ cluster = _get_cluster(clusterhost.cluster_id, session=session)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return clusterhost.state_dict()
+
+
+def _update_clusterhost_state(
+ clusterhost, from_database_only=False,
+ session=None, user=None, **kwargs
+):
+ """Update clusterhost state.
+
+ If from_database_only, the state will only be updated in database.
+ Otherwise a task sent to celery and os installer/package installer
+ will also update its state if needed.
+ """
+ if 'ready' in kwargs and kwargs['ready'] and not clusterhost.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ cluster_ready = False
+ host = clusterhost.host
+ cluster = clusterhost.cluster
+ host_ready = not host.state.ready
+ if ready_triggered:
+ cluster_ready = True
+ for clusterhost_in_cluster in cluster.clusterhosts:
+ if (
+ clusterhost_in_cluster.clusterhost_id
+ == clusterhost.clusterhost_id
+ ):
+ continue
+ if not clusterhost_in_cluster.state.ready:
+ cluster_ready = False
+
+ logging.info(
+ 'clusterhost %s ready: %s',
+ clusterhost.name, ready_triggered
+ )
+ logging.info('cluster ready: %s', cluster_ready)
+ logging.info('host ready: %s', host_ready)
+ if not ready_triggered or from_database_only:
+ logging.info('%s state is set to %s', clusterhost.name, kwargs)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ if not clusterhost.state.ready:
+ logging.info('%s state ready is set to False', cluster.name)
+ utils.update_db_object(session, cluster.state, ready=False)
+ status = '%s state is updated' % clusterhost.name
+ else:
+ if not user:
+ user_id = cluster.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.package_installed',
+ (
+ clusterhost.cluster_id, clusterhost.host_id,
+ cluster_ready, host_ready
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s: cluster ready %s host ready %s' % (
+ clusterhost.name, cluster_ready, host_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'clusterhost': clusterhost.state_dict()
+ }
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(['status', 'clusterhost'])
+def update_cluster_host_state_internal(
+ cluster_id, host_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a clusterhost state by installation process."""
+ # TODO(xicheng): it should be merged into update_cluster_host_state
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_state(
+ clusterhost, from_database_only=from_database_only,
+ session=session, users=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def update_clusterhost_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update a clusterhost itself state."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ # Modify(harry): without progress_update.py to update cluster state
+ # update cluster state here
+ cluster = _get_cluster(clusterhost.cluster_id, session=session)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return clusterhost.state_dict()
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(['status', 'clusterhost'])
+def update_clusterhost_state_internal(
+ clusterhost_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a clusterhost state by installation process."""
+ # TODO(xicheng): it should be merged into update_clusterhost_state
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return _update_clusterhost_state(
+ clusterhost, from_database_only=from_database_only,
+ session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTER_STATE_FIELDS,
+ ignore_support_keys=(IGNORE_FIELDS + IGNORE_UPDATED_CLUSTER_STATE_FIELDS)
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def update_cluster_state(
+ cluster_id, user=None, session=None, **kwargs
+):
+ """Update a cluster state."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return cluster.state_dict()
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTER_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_STATE
+)
+@utils.wrap_to_dict(['status', 'cluster'])
+def update_cluster_state_internal(
+ cluster_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a cluster state by installation process.
+
+ If from_database_only, the state will only be updated in database.
+ Otherwise a task sent to do state update in os installer and
+ package installer.
+ """
+ # TODO(xicheng): it should be merged into update_cluster_state
+ cluster = _get_cluster(cluster_id, session=session)
+ if 'ready' in kwargs and kwargs['ready'] and not cluster.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ clusterhost_ready = {}
+ if ready_triggered:
+ for clusterhost in cluster.clusterhosts:
+ clusterhost_ready[clusterhost.host_id] = (
+ not clusterhost.state.ready
+ )
+
+ logging.info('cluster %s ready: %s', cluster_id, ready_triggered)
+ logging.info('clusterhost ready: %s', clusterhost_ready)
+
+ if not ready_triggered or from_database_only:
+ logging.info('%s state is set to %s', cluster.name, kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ if not cluster.state.ready:
+ for clusterhost in cluster.clusterhosts:
+ logging.info('%s state ready is to False', clusterhost.name)
+ utils.update_db_object(
+ session, clusterhost.state, ready=False
+ )
+ status = '%s state is updated' % cluster.name
+ else:
+ if not user:
+ user_id = cluster.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.cluster_installed',
+ (clusterhost.cluster_id, clusterhost_ready),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s installed action set clusterhost ready %s' % (
+ cluster.name, clusterhost_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'cluster': cluster.state_dict()
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_cluster_host_log_histories(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).log_histories
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_clusterhost_log_histories(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost log history by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).log_histories
+
+
+def _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id, host id and filename."""
+ clusterhost = _get_cluster_host(cluster_id, host_id, session=session)
+ return utils.get_db_object(
+ session, models.ClusterHostLogHistory,
+ clusterhost_id=clusterhost.clusterhost_id, filename=filename,
+ **kwargs
+ )
+
+
+def _get_clusterhost_log_history(
+ clusterhost_id, filename, session=None, **kwargs
+):
+ """Get clusterhost log history by clusterhost id and filename."""
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return utils.get_db_object(
+ session, models.ClusterHostLogHistory,
+ clusterhost_id=clusterhost.clusterhost_id, filename=filename,
+ **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_cluster_host_log_history(
+ cluster_id, host_id, filename, user=None, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id, host id and filename."""
+ return _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_clusterhost_log_history(
+ clusterhost_id, filename, user=None, session=None, **kwargs
+):
+ """Get host log history by clusterhost id and filename."""
+ return _get_clusterhost_log_history(
+ clusterhost_id, filename, session=session
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def update_cluster_host_log_history(
+ cluster_id, host_id, filename, user=None, session=None, **kwargs
+):
+ """Update a host log history by cluster id, host id and filename."""
+ cluster_host_log_history = _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=session
+ )
+ return utils.update_db_object(
+ session, cluster_host_log_history, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def update_clusterhost_log_history(
+ clusterhost_id, filename, user=None, session=None, **kwargs
+):
+ """Update a host log history by clusterhost id and filename."""
+ clusterhost_log_history = _get_clusterhost_log_history(
+ clusterhost_id, filename, session=session
+ )
+ return utils.update_db_object(session, clusterhost_log_history, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_CLUSTERHOST_LOG_FIELDS,
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def add_clusterhost_log_history(
+ clusterhost_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history by clusterhost id and filename."""
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return utils.add_db_object(
+ session, models.ClusterHostLogHistory,
+ exception_when_existing,
+ clusterhost.clusterhost_id, filename, **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_CLUSTERHOST_LOG_FIELDS,
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def add_cluster_host_log_history(
+ cluster_id, host_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history by cluster id, host id and filename."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return utils.add_db_object(
+ session, models.ClusterHostLogHistory, exception_when_existing,
+ clusterhost.clusterhost_id, filename, **kwargs
+ )
diff --git a/compass-deck/db/api/database.py b/compass-deck/db/api/database.py
new file mode 100644
index 0000000..49769d7
--- /dev/null
+++ b/compass-deck/db/api/database.py
@@ -0,0 +1,264 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provider interface to manipulate database."""
+import functools
+import logging
+import netaddr
+
+from contextlib import contextmanager
+from sqlalchemy import create_engine
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.exc import OperationalError
+from sqlalchemy.orm import scoped_session
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import NullPool
+from sqlalchemy.pool import QueuePool
+from sqlalchemy.pool import SingletonThreadPool
+from sqlalchemy.pool import StaticPool
+from threading import local
+
+from compass.db import exception
+from compass.db import models
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+ENGINE = None
+SESSION = sessionmaker(autocommit=False, autoflush=False)
+SCOPED_SESSION = None
+SESSION_HOLDER = local()
+
+POOL_MAPPING = {
+ 'instant': NullPool,
+ 'static': StaticPool,
+ 'queued': QueuePool,
+ 'thread_single': SingletonThreadPool
+}
+
+
+def init(database_url=None):
+ """Initialize database.
+
+ Adjust sqlalchemy logging if necessary.
+
+ :param database_url: string, database url.
+ """
+ global ENGINE
+ global SCOPED_SESSION
+ if not database_url:
+ database_url = setting.SQLALCHEMY_DATABASE_URI
+ logging.info('init database %s', database_url)
+ root_logger = logging.getLogger()
+ fine_debug = root_logger.isEnabledFor(logsetting.LOGLEVEL_MAPPING['fine'])
+ if fine_debug:
+ logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
+ finest_debug = root_logger.isEnabledFor(
+ logsetting.LOGLEVEL_MAPPING['finest']
+ )
+ if finest_debug:
+ logging.getLogger('sqlalchemy.dialects').setLevel(logging.INFO)
+ logging.getLogger('sqlalchemy.pool').setLevel(logging.INFO)
+ logging.getLogger('sqlalchemy.orm').setLevel(logging.INFO)
+ poolclass = POOL_MAPPING[setting.SQLALCHEMY_DATABASE_POOL_TYPE]
+ ENGINE = create_engine(
+ database_url, convert_unicode=True,
+ poolclass=poolclass
+ )
+ SESSION.configure(bind=ENGINE)
+ SCOPED_SESSION = scoped_session(SESSION)
+ models.BASE.query = SCOPED_SESSION.query_property()
+
+
+def in_session():
+ """check if in database session scope."""
+ bool(hasattr(SESSION_HOLDER, 'session'))
+
+
+@contextmanager
+def session(exception_when_in_session=True):
+ """database session scope.
+
+ To operate database, it should be called in database session.
+ If not exception_when_in_session, the with session statement support
+ nested session and only the out most session commit/rollback the
+ transaction.
+ """
+ if not ENGINE:
+ init()
+
+ nested_session = False
+ if hasattr(SESSION_HOLDER, 'session'):
+ if exception_when_in_session:
+ logging.error('we are already in session')
+ raise exception.DatabaseException('session already exist')
+ else:
+ new_session = SESSION_HOLDER.session
+ nested_session = True
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'reuse session %s', nested_session
+ )
+ else:
+ new_session = SCOPED_SESSION()
+ setattr(SESSION_HOLDER, 'session', new_session)
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'enter session %s', new_session
+ )
+ try:
+ yield new_session
+ if not nested_session:
+ new_session.commit()
+ except Exception as error:
+ if not nested_session:
+ new_session.rollback()
+ logging.error('failed to commit session')
+ logging.exception(error)
+ if isinstance(error, IntegrityError):
+ for item in error.statement.split():
+ if item.islower():
+ object = item
+ break
+ raise exception.DuplicatedRecord(
+ '%s in %s' % (error.orig, object)
+ )
+ elif isinstance(error, OperationalError):
+ raise exception.DatabaseException(
+ 'operation error in database'
+ )
+ elif isinstance(error, exception.DatabaseException):
+ raise error
+ else:
+ raise exception.DatabaseException(str(error))
+ finally:
+ if not nested_session:
+ new_session.close()
+ SCOPED_SESSION.remove()
+ delattr(SESSION_HOLDER, 'session')
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'exit session %s', new_session
+ )
+
+
+def current_session():
+ """Get the current session scope when it is called.
+
+ :return: database session.
+ :raises: DatabaseException when it is not in session.
+ """
+ try:
+ return SESSION_HOLDER.session
+ except Exception as error:
+ logging.error('It is not in the session scope')
+ logging.exception(error)
+ if isinstance(error, exception.DatabaseException):
+ raise error
+ else:
+ raise exception.DatabaseException(str(error))
+
+
+def run_in_session(exception_when_in_session=True):
+ """Decorator to make sure the decorated function run in session.
+
+ When not exception_when_in_session, the run_in_session can be
+ decorated several times.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ my_session = kwargs.get('session')
+ if my_session is not None:
+ return func(*args, **kwargs)
+ else:
+ with session(
+ exception_when_in_session=exception_when_in_session
+ ) as my_session:
+ kwargs['session'] = my_session
+ return func(*args, **kwargs)
+ except Exception as error:
+ logging.error(
+ 'got exception with func %s args %s kwargs %s',
+ func, args, kwargs
+ )
+ logging.exception(error)
+ raise error
+ return wrapper
+ return decorator
+
+
+def _setup_user_table(user_session):
+ """Initia