diff options
Diffstat (limited to 'compass-deck/bin')
27 files changed, 2733 insertions, 0 deletions
diff --git a/compass-deck/bin/README.md b/compass-deck/bin/README.md new file mode 100644 index 0000000..7052059 --- /dev/null +++ b/compass-deck/bin/README.md @@ -0,0 +1,66 @@ +Compass Binaries and Scripts +============================= + +bin/ contains compass heavy-lifting utility scripts and binaries. These scripts are often called by different components. Some are from core python modules and some are from compass daemon and other services. Most files in `bin/` are placed under `/opt/compass/bin/` after install.sh is complete. Some of them will go to `/usr/bin/` or `/etc/init.d/` as system binaries or services. + +###Directories and Files + +Below is a walkthrough of all directories and files. + + * ansible-callbacks/ - contains callback scripts for ansible installer. + * playbook_done.py - triggered by ansible when all playbooks are successfully executed. + Then the script will call compass API to report ansible "complete" status. + * chef/ - utility scripts for chef installer. + * addcookbooks.py - upload all chef cookbooks to the chef server. + * adddatabags.py - (deprecated) upload all chef databags to the chef server. + * addroles.py - upload all chef roles to the chef server. + * clean_clients.sh - remove all chef clients on the chef server. + * clean_environments.sh - remove all chef environments on the chef server. + * clean_nodes.sh - remove all chef nodes on the chef server. + * cobbler/ - utility scripts for cobbler installer + * remove_systems.sh - remove all systems on the cobbler server. + * clean_installation_logs.py - remove all the installation logs. + * clean_installers.py - remove all configurations and data from all installers. + * client.sh - sample client script to call client.py + * client.py - compass python client that calls API and deploy a cluster based on given configurations. + * compass_check.py - binary file that is placed as /usr/bin/compass. This is the main entrance of compass check CLI. + * compassd - (deprecated) old compass daemon file + * compass_wsgi.py - compass wsgi module. + * csvdeploy.py - script that enable the deployment of clusters from spreadsheets. + * delete_clusters.py - script that deletes all given clusters and their underlying hosts. + * manage_db.py - utility binary that manages database. + * poll_switch.py - utility script to poll machine mac addresses that are connected to certain switches. + * progress_update.py - main script to run as a service to update hosts installing progresses. + * query_switch.py - independent script to query a switch. + * refresh.sh - refresh compass-db, restart compass services and clean up all installers. + * runserver.py - manually run a compass server instance. + * switch_virtualenv.py.template - template of switch_virtualenv.py. This script enables switching between python + virtual environments. + +###Script Location and Calling Modules +Script name | Location | Called by +--- | --- | --- +ansible-callbacks/playbook_done.py | /opt/compass/bin/ansible-callbacks/playbookd_done.py | ***ansible-playbook*** +chef/addcookbooks.py | /opt/compass/bin/addcookbooks.py | ***install/chef.sh*** +chef/adddatabags.py(deprecated) | /opt/compass/bin/addcookbooks.py | None +chef/addroles.py | /opt/compass/bin/addroles.py | ***install/chef.sh*** +chef/clean_clients.sh | /opt/compass/bin/clean_clients.sh | ***compass.tasks.clean_package_installer*** +chef/clean_environments.sh | /opt/compass/bin/clean_environments.sh | ***compass.tasks.clean_package_installer*** +chef/clean_nodes.sh | /opt/compass/bin/clean_nodes.sh | ***compass.tasks.clean_package_installer*** +cobbler/remove_systems.sh | /opt/compass/bin/remove_systems.sh | ***compass.tasks.clean_os_installer*** +clean_installation_logs.py | /opt/compass/bin/clean_installation_logs.py | ***bin/refresh.sh*** +clean_installers.py | /opt/compass/bin/clean_installers.py | ***bin/refresh.sh*** +client.sh | /opt/compass/bin/client.sh | sample client +client.py | /opt/compass/bin/client.py | ***regtest/regtest.sh*** +compsas_check.py | /opt/compass/bin/compass_check.py | ***compass check cli*** +compassd(deprecated) | None | None +compass_wsgi.py | /var/www/compass/compass.wsgi | ***Apache daemon*** +csvdeploy.py | /opt/compass/bin/csvdeploy.py | command-line script +delete_clusters.py | /opt/compass/bin/delete_clusters.py | command-line script +manage_db.py | /opt/compass/bin/manage_db.py | ***install/compass.sh*** and command-line script +poll_switch.py | /opt/compass/bin/poll_switch.py | command-line script +progress_update.py | /opt/compass/bin/progress_update.py | ***compass-progress-updated daemon*** +query_switch.py | /opt/compass/bin/query_switch.py | command-line script +refresh.sh | /opt/compass/bin/refresh.sh | command-line script +runserver.py | /opt/compass/bin/runserver.py | command-line script +switch_virtualenv.py.template | /opt/compass/bin/switch_virtualenv.py | ***all scripts using this library*** diff --git a/compass-deck/bin/ansible_callbacks/playbook_done.py b/compass-deck/bin/ansible_callbacks/playbook_done.py new file mode 100755 index 0000000..23d75a9 --- /dev/null +++ b/compass-deck/bin/ansible_callbacks/playbook_done.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ansible playbook callback after a playbook run has completed.""" +import logging +import os +import simplejson as json +import sys + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir + '/..') + +import switch_virtualenv + +from compass.apiclient.restful import Client +from compass.utils import flags + +flags.add('compass_server', + help='compass server url', + default='http://127.0.0.1/api') +flags.add('compass_user_email', + help='compass user email', + default='admin@huawei.com') +flags.add('compass_user_password', + help='compass user password', + default='admin') + + +class CallbackModule(object): + def __init__(self): + self.disabled = False + try: + self.client = self._get_client() + except Exception: + self.disabled = True + logging.error("No compass server found" + "disabling this plugin") + + def _get_client(self): + return Client(flags.OPTIONS.compass_server) + + def _login(self, client): + """get apiclient token.""" + status, resp = client.get_token( + flags.OPTIONS.compass_user_email, + flags.OPTIONS.compass_user_password + ) + logging.info( + 'login status: %s, resp: %s', + status, resp + ) + if status >= 400: + raise Exception( + 'failed to login %s with user %s', + flags.OPTIONS.compass_server, + flags.OPTIONS.compass_user_email + ) + return resp['token'] + + def playbook_on_stats(self, stats): + hosts = sorted(stats.processed.keys()) + host_vars = self.playbook.inventory.get_variables(hosts[0]) + cluster_name = host_vars['cluster_name'] + + failures = False + unreachable = False + + for host in hosts: + summary = stats.summarize(host) + + if summary['failures'] > 0: + failures = True + if summary['unreachable'] > 0: + unreachable = True + + if failures or unreachable: + return + + self._login(self.client) + + for host in hosts: + clusterhost_name = host + "." + cluster_name + self.client.clusterhost_ready(clusterhost_name) diff --git a/compass-deck/bin/chef/addcookbooks.py b/compass-deck/bin/chef/addcookbooks.py new file mode 100755 index 0000000..f23dac4 --- /dev/null +++ b/compass-deck/bin/chef/addcookbooks.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""import cookbooks to chef server.""" +import logging +import os +import os.path +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + + +from compass.utils import flags +from compass.utils import logsetting + + +flags.add('cookbooks_dir', + help='chef cookbooks directory', + default='/var/chef/cookbooks') + + +def main(): + """main entry.""" + flags.init() + logsetting.init() + cookbooks_dir = flags.OPTIONS.cookbooks_dir + logging.info('add cookbooks %s', cookbooks_dir) + cmd = "knife cookbook upload --all --cookbook-path %s" % cookbooks_dir + status = os.system(cmd) + logging.info('run cmd %s returns %s', cmd, status) + if status: + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/compass-deck/bin/chef/adddatabags.py b/compass-deck/bin/chef/adddatabags.py new file mode 100755 index 0000000..ba2d08c --- /dev/null +++ b/compass-deck/bin/chef/adddatabags.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""import databags to chef server.""" +import logging +import os +import os.path +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + +from compass.utils import flags +from compass.utils import logsetting + + +flags.add('databags_dir', + help='chef databags directory', + default='/var/chef/databags') + + +def main(): + """main entry.""" + flags.init() + logsetting.init() + databags = [] + databags_dir = flags.OPTIONS.databags_dir + for item in os.listdir(databags_dir): + databags.append(item) + + for databag in databags: + logging.info('add databag %s', databag) + cmd = "knife data bag create %s" % databag + os.system(cmd) + databag_items = [] + databagitem_dir = os.path.join(databags_dir, databag) + for item in os.listdir(databagitem_dir): + if item.endswith('.json'): + databag_items.append(os.path.join(databagitem_dir, item)) + else: + logging.info('ignore %s in %s', item, databagitem_dir) + + for databag_item in databag_items: + logging.info('add databag item %s to databag %s', + databag_item, databag) + cmd = 'knife data bag from file %s %s' % (databag, databag_item) + status = os.system(cmd) + logging.info('run cmd %s returns %s', cmd, status) + if status: + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/compass-deck/bin/chef/addroles.py b/compass-deck/bin/chef/addroles.py new file mode 100755 index 0000000..2745506 --- /dev/null +++ b/compass-deck/bin/chef/addroles.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""script to import roles to chef server.""" +import logging +import os +import os.path +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + +from compass.utils import flags +from compass.utils import logsetting + + +flags.add('roles_dir', + help='chef roles directory', + default='/var/chef/roles') + + +def main(): + """main entry.""" + flags.init() + logsetting.init() + rolelist = [] + roles_dir = flags.OPTIONS.roles_dir + + for item in os.listdir(roles_dir): + if item.endswith('.rb') or item.endswith('.json'): + rolelist.append(os.path.join(roles_dir, item)) + else: + logging.info('ignore %s in %s', item, roles_dir) + + for role in rolelist: + logging.info('add role %s', role) + cmd = "knife role from file %s" % role + status = os.system(cmd) + logging.info('run cmd %s returns %s', cmd, status) + if status: + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/compass-deck/bin/chef/clean_clients.sh b/compass-deck/bin/chef/clean_clients.sh new file mode 100755 index 0000000..7a26bea --- /dev/null +++ b/compass-deck/bin/chef/clean_clients.sh @@ -0,0 +1,6 @@ +#!/bin/bash +echo "clean chef clients" +yes | knife client bulk delete '^(?!chef-).*' +if [[ "$?" != "0" ]]; then + echo "failed to clean all clients" +fi diff --git a/compass-deck/bin/chef/clean_environments.sh b/compass-deck/bin/chef/clean_environments.sh new file mode 100755 index 0000000..f9b5052 --- /dev/null +++ b/compass-deck/bin/chef/clean_environments.sh @@ -0,0 +1,13 @@ +#!/bin/bash +echo "clean chef environments" +environments=$(knife environment list) +for environment in $environments; do + if [[ "$environment" != "_default" ]]; then + yes | knife environment delete $environment + if [[ "$?" != "0" ]]; then + echo "failed to delete environment $environment" + else + echo "environment $environment is deleted" + fi + fi +done diff --git a/compass-deck/bin/chef/clean_nodes.sh b/compass-deck/bin/chef/clean_nodes.sh new file mode 100755 index 0000000..8224b82 --- /dev/null +++ b/compass-deck/bin/chef/clean_nodes.sh @@ -0,0 +1,6 @@ +#!/bin/bash +echo "clean chef nodes" +yes | knife node bulk delete '.*' +if [[ "$?" != "0" ]]; then + echo "failed to clean all nodes" +fi diff --git a/compass-deck/bin/clean_installation_logs.py b/compass-deck/bin/clean_installation_logs.py new file mode 100755 index 0000000..0ae20f1 --- /dev/null +++ b/compass-deck/bin/clean_installation_logs.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""clean all installation logs.""" +import logging +import os +import os.path +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + +from compass.utils import flags +from compass.utils import logsetting +from compass.utils import setting_wrapper as setting + + +def clean_installation_logs(): + installation_log_dirs = setting.INSTALLATION_LOGDIR + successful = True + for _, logdir in installation_log_dirs.items(): + cmd = 'rm -rf %s/*' % logdir + status = os.system(cmd) + logging.info('run cmd %s resturns %s', cmd, status) + if status: + successful = False + return successful + + +if __name__ == "__main__": + flags.init() + logsetting.init() + clean_installation_logs() diff --git a/compass-deck/bin/clean_installers.py b/compass-deck/bin/clean_installers.py new file mode 100755 index 0000000..ae6dab2 --- /dev/null +++ b/compass-deck/bin/clean_installers.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Scripts to delete cluster and it hosts""" +import logging +import os +import os.path +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + +from compass.actions import clean +from compass.db.api import adapter_holder as adapter_api +from compass.db.api import database +from compass.db.api import user as user_api +from compass.tasks.client import celery +from compass.utils import flags +from compass.utils import logsetting +from compass.utils import setting_wrapper as setting + + +flags.add_bool('async', + help='run in async mode', + default=True) + +flags.add('os_installers', + help='comma seperated os installers', + default='') +flags.add('package_installers', + help='comma separated package installers', + default='') + + +def clean_installers(): + os_installers = [ + os_installer + for os_installer in flags.OPTIONS.os_installers.split(',') + if os_installer + ] + package_installers = [ + package_installer + for package_installer in flags.OPTIONS.package_installers.split(',') + if package_installer + ] + user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL) + adapters = adapter_api.list_adapters(user=user) + filtered_os_installers = {} + filtered_package_installers = {} + for adapter in adapters: + logging.info( + 'got adapter: %s', adapter + ) + if 'os_installer' in adapter: + os_installer = adapter['os_installer'] + os_installer_name = os_installer['alias'] + if not os_installers or os_installer_name in os_installers: + filtered_os_installers[os_installer_name] = os_installer + else: + logging.info( + 'ignore os installer %s', os_installer_name + ) + else: + logging.info( + 'cannot find os installer in adapter %s', + adapter['name'] + ) + if 'package_installer' in adapter: + package_installer = adapter['package_installer'] + package_installer_name = package_installer['alias'] + if ( + not package_installers or + package_installer_name in package_installers + ): + filtered_package_installers[package_installer_name] = ( + package_installer + ) + else: + logging.info( + 'ignore package installer %s', package_installer_name + ) + else: + logging.info( + 'cannot find package installer in adapter %s', + adapter['name'] + ) + logging.info( + 'clean os installers: %s', filtered_os_installers.keys() + ) + logging.info( + 'clean package installers: %s', filtered_package_installers.keys() + ) + if flags.OPTIONS.async: + for os_installer_name, os_installer in filtered_os_installers.items(): + celery.send_task( + 'compass.tasks.clean_os_installer', + ( + os_installer['name'], + os_installer['settings'] + ) + ) + for package_installer_name, package_installer in ( + filtered_package_installers.items() + ): + celery.send_task( + 'compass.tasks.clean_package_installer', + ( + package_installer['name'], + package_installer['settings'] + ) + ) + else: + for os_installer_name, os_installer in ( + filtered_os_installers.items() + ): + try: + clean.clean_os_installer( + os_installer['name'], + os_installer['settings'] + ) + except Exception as error: + logging.error( + 'failed to clean os installer %s', os_installer_name + ) + logging.exception(error) + for package_installer_name, package_installer in ( + filtered_package_installers.items() + ): + try: + clean.clean_package_installer( + package_installer['name'], + package_installer['settings'] + ) + except Exception as error: + logging.error( + 'failed to clean package installer %s', + package_installer_name + ) + logging.exception(error) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + database.init() + clean_installers() diff --git a/compass-deck/bin/client.py b/compass-deck/bin/client.py new file mode 100755 index 0000000..d8eb59f --- /dev/null +++ b/compass-deck/bin/client.py @@ -0,0 +1,1006 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""binary to deploy a cluster by compass client api.""" +from collections import defaultdict +import itertools +import json +import netaddr +import os +import re +import requests +from restful import Client +import socket +import sys +import time +import yaml + +ROLE_UNASSIGNED = True +ROLE_ASSIGNED = False + +import log as logging +LOG = logging.getLogger(__name__) + +from oslo_config import cfg +CONF = cfg.CONF + + +def byteify(input): + if isinstance(input, dict): + return dict([(byteify(key), byteify(value)) + for key, value in input.iteritems()]) + elif isinstance(input, list): + return [byteify(element) for element in input] + elif isinstance(input, unicode): + return input.encode('utf-8') + else: + return input + +opts = [ + cfg.StrOpt( + 'compass_server', + help='compass server url', + default='http://127.0.0.1/api' + ), + cfg.StrOpt( + 'compass_user_email', + help='compass user email', + default='admin@huawei.com' + ), + cfg.StrOpt( + 'compass_user_password', + help='compass user password', + default='admin' + ), + cfg.StrOpt( + 'switch_ips', + help='comma seperated switch ips', + default='' + ), + cfg.StrOpt( + 'switch_credential', + help='comma separated <credential key>=<credential value>', + default='version=2c,community=public' + ), + cfg.IntOpt( + 'switch_max_retries', + help='max retries of poll switch', + default=10 + ), + cfg.IntOpt( + 'switch_retry_interval', + help='interval to repoll switch', + default=10 + ), + cfg.BoolOpt( + 'poll_switches', + help='if the client polls switches', + default=True + ), + cfg.StrOpt( + 'machines', + help='comma separated mac addresses of machines', + default='' + ), + cfg.StrOpt( + 'subnets', + help='comma seperated subnets', + default='' + ), + cfg.StrOpt( + 'adapter_name', + help='adapter name', + default='' + ), + cfg.StrOpt( + 'adapter_os_pattern', + help='adapter os name', + default=r'^(?i)centos.*' + ), + cfg.StrOpt( + 'adapter_target_system_pattern', + help='adapter target system name', + default='^openstack$' + ), + cfg.StrOpt( + 'adapter_flavor_pattern', + help='adapter flavor name', + default='allinone' + ), + cfg.StrOpt( + 'cluster_name', + help='cluster name', + default='cluster1' + ), + cfg.StrOpt( + 'language', + help='language', + default='EN' + ), + cfg.StrOpt( + 'timezone', + help='timezone', + default='GMT' + ), + cfg.StrOpt( + 'http_proxy', + help='http proxy', + default='' + ), + cfg.StrOpt( + 'https_proxy', + help='https proxy', + default='' + ), + cfg.StrOpt( + 'no_proxy', + help='no proxy', + default='' + ), + cfg.StrOpt( + 'ntp_server', + help='ntp server', + default='' + ), + cfg.StrOpt( + 'dns_servers', + help='dns servers', + default='' + ), + cfg.StrOpt( + 'domain', + help='domain', + default='' + ), + cfg.StrOpt( + 'search_path', + help='search path', + default='' + ), + cfg.StrOpt( + 'local_repo_url', + help='local repo url', + default='' + ), + cfg.StrOpt( + 'default_gateway', + help='default gateway', + default='' + ), + cfg.StrOpt( + 'server_credential', + help=( + 'server credential formatted as ' + '<username>=<password>' + ), + default='root=root' + ), + cfg.StrOpt( + 'os_config_json_file', + help='json formatted os config file', + default='' + ), + cfg.StrOpt( + 'service_credentials', + help=( + 'comma seperated service credentials formatted as ' + '<servicename>:<username>=<password>,...' + ), + default='' + ), + cfg.StrOpt( + 'console_credentials', + help=( + 'comma seperated console credential formated as ' + '<consolename>:<username>=<password>' + ), + default='' + ), + cfg.StrOpt( + 'hostnames', + help='comma seperated hostname', + default='' + ), + cfg.StrOpt( + 'host_networks', + help=( + 'semicomma seperated host name and its networks ' + '<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscuous>,...' + ), + default='' + ), + cfg.StrOpt( + 'partitions', + help=( + 'comma seperated partitions ' + '<partition name>=<partition_value>' + ), + default='tmp:percentage=10%,var:percentage=30%,home:percentage=30%' + ), + cfg.StrOpt( + 'network_mapping', + help=( + 'comma seperated network mapping ' + '<network_type>=<interface_name>' + ), + default='' + ), + cfg.StrOpt( + 'package_config_json_file', + help='json formatted os config file', + default='' + ), + cfg.StrOpt( + 'host_roles', + help=( + 'semicomma separated host roles ' + '<hostname>=<comma separated roles>' + ), + default='' + ), + cfg.StrOpt( + 'default_roles', + help=( + 'comma seperated default roles ' + '<rolename>' + ), + default='' + ), + cfg.IntOpt( + 'action_timeout', + help='action timeout in seconds', + default=60 + ), + cfg.IntOpt( + 'deployment_timeout', + help='deployment timeout in minutes', + default=60 + ), + cfg.IntOpt( + 'progress_update_check_interval', + help='progress update status check interval in seconds', + default=60 + ), + cfg.StrOpt( + 'dashboard_url', + help='dashboard url', + default='' + ), + cfg.StrOpt( + 'dashboard_link_pattern', + help='dashboard link pattern', + default=r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)' + ), + cfg.StrOpt( + 'cluster_vip', + help='cluster ip address', + default='' + ), + cfg.StrOpt( + 'enable_secgroup', + help='enable security group', + default='true' + ), + cfg.StrOpt( + 'network_cfg', + help='netowrk config file', + default='' + ), + cfg.StrOpt( + 'neutron_cfg', + help='netowrk config file', + default='' + ), + cfg.StrOpt( + 'cluster_pub_vip', + help='cluster ip address', + default='' + ), + cfg.StrOpt( + 'cluster_prv_vip', + help='cluster ip address', + default='' + ), + cfg.StrOpt( + 'repo_name', + help='repo name', + default='' + ), + cfg.StrOpt( + 'deploy_type', + help='deploy type', + default='virtual' + ), +] +CONF.register_cli_opts(opts) + + +def is_role_unassigned(role): + return role + + +def _load_config(config_filename): + if not config_filename: + return {} + with open(config_filename) as config_file: + content = config_file.read() + return json.loads(content) + + +class CompassClient(object): + def __init__(self): + LOG.info("xh: compass_server=%s" % CONF.compass_server) + self.client = Client(CONF.compass_server) + self.subnet_mapping = {} + self.role_mapping = {} + self.host_mapping = {} + self.host_ips = defaultdict(list) + self.host_roles = {} + + self.login() + + def is_ok(self, status): + if status < 300 and status >= 200: + return True + + def login(self): + status, resp = self.client.get_token( + CONF.compass_user_email, + CONF.compass_user_password + ) + + LOG.info( + 'login status: %s, resp: %s', + status, resp + ) + if self.is_ok(status): + return resp["token"] + else: + raise Exception( + 'failed to login %s with user %s', + CONF.compass_server, + CONF.compass_user_email + ) + + def get_machines(self): + status, resp = self.client.list_machines() + LOG.info( + 'get all machines status: %s, resp: %s', status, resp) + if not self.is_ok(status): + raise RuntimeError('failed to get machines') + + machines_to_add = list(set([ + machine for machine in CONF.machines.split(',') + if machine + ])) + + LOG.info('machines to add: %s', machines_to_add) + machines_db = [str(m["mac"]) for m in resp] + LOG.info('machines in db: %s', machines_db) + assert(set(machines_db) == set(machines_to_add)) + + return [m["id"] for m in resp] + + def get_adapter(self): + """get adapter.""" + status, resp = self.client.list_adapters(name=CONF.adapter_name) + LOG.info( + 'get all adapters status: %s, resp: %s', + status, resp + ) + + if not self.is_ok(status) or not resp: + raise RuntimeError('failed to get adapters') + + os_re = re.compile(CONF.adapter_os_pattern) + flavor_re = re.compile(CONF.adapter_flavor_pattern) + + adapter_id = None + os_id = None + flavor_id = None + adapter = None + + adapter = resp[0] + adapter_id = adapter['id'] + for supported_os in adapter['supported_oses']: + if not os_re or os_re.match(supported_os['name']): + os_id = supported_os['os_id'] + break + + if 'flavors' in adapter: + for flavor in adapter['flavors']: + if not flavor_re or flavor_re.match(flavor['name']): + flavor_id = flavor['id'] + break + + assert(os_id and flavor_id) + return (adapter_id, os_id, flavor_id) + + def add_subnets(self): + subnets = [ + subnet for subnet in CONF.subnets.split(',') + if subnet + ] + + assert(subnets) + + subnet_mapping = {} + for subnet in subnets: + try: + netaddr.IPNetwork(subnet) + except Exception: + raise RuntimeError('subnet %s format is invalid' % subnet) + + status, resp = self.client.add_subnet(subnet) + LOG.info( + 'add subnet %s status %s response %s', + subnet, + status, + resp + ) + if not self.is_ok(status): + raise RuntimeError('failed to add subnet %s' % subnet) + + subnet_mapping[resp['subnet']] = resp['id'] + + self.subnet_mapping = subnet_mapping + + def add_cluster(self, adapter_id, os_id, flavor_id): + """add a cluster.""" + cluster_name = CONF.cluster_name + assert(cluster_name) + status, resp = self.client.add_cluster( + cluster_name, adapter_id, + os_id, flavor_id) + + if not self.is_ok(status): + raise RuntimeError("add cluster failed") + + LOG.info( + 'add cluster %s status: %s resp:%s', + cluster_name, + status, + resp + ) + + if isinstance(resp, list): + cluster = resp[0] + else: + cluster = resp + + cluster_id = cluster['id'] + flavor = cluster.get('flavor', {}) + roles = flavor.get('roles', []) + + for role in roles: + if role.get('optional', False): + self.role_mapping[role['name']] = ROLE_ASSIGNED + else: + self.role_mapping[role['name']] = ROLE_UNASSIGNED + + return cluster_id + + def add_cluster_hosts(self, cluster_id, machines): + hostnames = [ + hostname for hostname in CONF.hostnames.split(',') + if hostname + ] + + assert(len(machines) == len(hostnames)) + + machines_dict = [] + for machine_id, hostname in zip(machines, hostnames): + machines_dict.append({ + 'machine_id': machine_id, + 'name': hostname + }) + + # add hosts to the cluster. + status, resp = self.client.add_hosts_to_cluster( + cluster_id, + {'machines': machines_dict}) + + LOG.info( + 'add machines %s to cluster %s status: %s, resp: %s', + machines_dict, + cluster_id, + status, + resp + ) + + if not self.is_ok(status): + raise RuntimeError("add host to cluster failed") + + for host in resp['hosts']: + self.host_mapping[host['hostname']] = host['id'] + + assert(len(self.host_mapping) == len(machines)) + + def set_cluster_os_config(self, cluster_id): + """set cluster os config.""" + os_config = {} + language = CONF.language + timezone = CONF.timezone + http_proxy = CONF.http_proxy + https_proxy = CONF.https_proxy + local_repo_url = CONF.local_repo_url + repo_name = CONF.repo_name + deploy_type = CONF.deploy_type + if not https_proxy and http_proxy: + https_proxy = http_proxy + + no_proxy = [ + no_proxy for no_proxy in CONF.no_proxy.split(',') + if no_proxy + ] + + compass_server = CONF.compass_server + if http_proxy: + for hostname, ips in self.host_ips.items(): + no_proxy.append(hostname) + no_proxy.extend(ips) + + ntp_server = CONF.ntp_server or compass_server + + dns_servers = [ + dns_server for dns_server in CONF.dns_servers.split(',') + if dns_server + ] + if not dns_servers: + dns_servers = [compass_server] + + domain = CONF.domain + if not domain: + raise Exception('domain is not defined') + + search_path = [ + search_path for search_path in CONF.search_path.split(',') + if search_path + ] + + if not search_path: + search_path = [domain] + + default_gateway = CONF.default_gateway + if not default_gateway: + raise Exception('default gateway is not defined') + + general_config = { + 'language': language, + 'timezone': timezone, + 'ntp_server': ntp_server, + 'dns_servers': dns_servers, + 'default_gateway': default_gateway + } + + if http_proxy: + general_config['http_proxy'] = http_proxy + if https_proxy: + general_config['https_proxy'] = https_proxy + if no_proxy: + general_config['no_proxy'] = no_proxy + if domain: + general_config['domain'] = domain + if search_path: + general_config['search_path'] = search_path + if local_repo_url: + general_config['local_repo'] = local_repo_url + if repo_name: + general_config['repo_name'] = repo_name + if deploy_type: + general_config['deploy_type'] = deploy_type + + os_config["general"] = general_config + + server_credential = CONF.server_credential + if '=' in server_credential: + server_username, server_password = server_credential.split('=', 1) + elif server_credential: + server_username = server_password = server_credential + else: + server_username = 'root' + server_password = 'root' + + os_config['server_credentials'] = { + 'username': server_username, + 'password': server_password + } + + partitions = [ + partition for partition in CONF.partitions.split(',') + if partition + ] + + partition_config = {} + for partition in partitions: + assert("=" in partition) + + partition_name, partition_value = partition.split('=', 1) + partition_name = partition_name.strip() + partition_value = partition_value.strip() + + assert(partition_name and partition_value) + + if partition_value.endswith('%'): + partition_type = 'percentage' + partition_value = int(partition_value[:-1]) + else: + partition_type = 'size' + + partition_config[partition_name] = { + partition_type: partition_value + } + + os_config['partition'] = partition_config + + """ + os_config_filename = CONF.os_config_json_file + if os_config_filename: + util.merge_dict( + os_config, _load_config(os_config_filename) + ) + """ + + status, resp = self.client.update_cluster_config( + cluster_id, os_config=os_config) + LOG.info( + 'set os config %s to cluster %s status: %s, resp: %s', + os_config, cluster_id, status, resp) + if not self.is_ok(status): + raise RuntimeError('failed to set os config %s to cluster %s' + % (os_config, cluster_id)) + + def set_host_networking(self): + """set cluster hosts networking.""" + def get_subnet(ip_str): + try: + LOG.info("subnets: %s" % self.subnet_mapping.keys()) + ip = netaddr.IPAddress(ip_str) + for cidr, subnet_id in self.subnet_mapping.items(): + subnet = netaddr.IPNetwork(cidr) + if ip in subnet: + return True, subnet_id + + LOG.info("ip %s not in %s" % (ip_str, cidr)) + return False, None + except Exception: + LOG.exception("ip addr %s is invalid" % ip_str) + return False, None + + for host_network in CONF.host_networks.split(';'): + hostname, networks_str = host_network.split(':', 1) + hostname = hostname.strip() + networks_str = networks_str.strip() + + assert(hostname in self.host_mapping) + + host_id = self.host_mapping[hostname] + intf_list = networks_str.split(',') + for intf_str in intf_list: + interface, intf_properties = intf_str.split('=', 1) + intf_properties = intf_properties.strip().split('|') + + assert(intf_properties) + ip_str = intf_properties[0] + + status, subnet_id = get_subnet(ip_str) + if not status: + raise RuntimeError("ip addr %s is invalid" % ip_str) + + properties = dict([ + (intf_property, True) + for intf_property in intf_properties[1:] + ]) + + LOG.info( + 'add host %s interface %s ip %s network proprties %s', + hostname, interface, ip_str, properties) + + status, response = self.client.add_host_network( + host_id, interface, ip=ip_str, subnet_id=subnet_id, + **properties + ) + + LOG.info( + 'add host %s interface %s ip %s network properties %s ' + 'status %s: %s', + hostname, interface, ip_str, properties, + status, response + ) + + if not self.is_ok(status): + raise RuntimeError("add host network failed") + + self.host_ips[hostname].append(ip_str) + + def set_cluster_package_config(self, cluster_id): + """set cluster package config.""" + package_config = {"security": {}} + + service_credentials = [ + service_credential + for service_credential in CONF.service_credentials.split(',') + if service_credential + ] + + service_credential_cfg = {} + LOG.info( + 'service credentials: %s', service_credentials + ) + + for service_credential in service_credentials: + if ':' not in service_credential: + raise Exception( + 'no : in service credential %s' % service_credential + ) + service_name, service_pair = service_credential.split(':', 1) + if '=' not in service_pair: + raise Exception( + 'there is no = in service %s security' % service_name + ) + + username, password = service_pair.split('=', 1) + service_credential_cfg[service_name] = { + 'username': username, + 'password': password + } + + console_credentials = [ + console_credential + for console_credential in CONF.console_credentials.split(',') + if console_credential + ] + + LOG.info( + 'console credentials: %s', console_credentials + ) + + console_credential_cfg = {} + for console_credential in console_credentials: + if ':' not in console_credential: + raise Exception( + 'there is no : in console credential %s' + % console_credential + ) + console_name, console_pair = console_credential.split(':', 1) + if '=' not in console_pair: + raise Exception( + 'there is no = in console %s security' % console_name + ) + username, password = console_pair.split('=', 1) + console_credential_cfg[console_name] = { + 'username': username, + 'password': password + } + + package_config["security"] = { + "service_credentials": service_credential_cfg, + "console_credentials": console_credential_cfg + } + + network_mapping = dict([ + network_pair.split('=', 1) + for network_pair in CONF.network_mapping.split(',') + if '=' in network_pair + ]) + + package_config['network_mapping'] = network_mapping + + assert(os.path.exists(CONF.network_cfg)) + network_cfg = yaml.load(open(CONF.network_cfg)) + package_config["network_cfg"] = network_cfg + + assert(os.path.exists(CONF.neutron_cfg)) + neutron_cfg = yaml.load(open(CONF.neutron_cfg)) + package_config["neutron_config"] = neutron_cfg + + """ + package_config_filename = CONF.package_config_json_file + if package_config_filename: + util.merge_dict( + package_config, _load_config(package_config_filename) + ) + """ + package_config['ha_proxy'] = {} + if CONF.cluster_vip: + package_config["ha_proxy"]["vip"] = CONF.cluster_vip + + package_config['enable_secgroup'] = (CONF.enable_secgroup == "true") + + status, resp = self.client.update_cluster_config( + cluster_id, package_config=package_config) + LOG.info( + 'set package config %s to cluster %s status: %s, resp: %s', + package_config, cluster_id, status, resp) + + if not self.is_ok(status): + raise RuntimeError("set cluster package_config failed") + + def set_host_roles(self, cluster_id, host_id, roles): + status, response = self.client.update_cluster_host( + cluster_id, host_id, roles=roles) + + LOG.info( + 'set cluster %s host %s roles %s status %s: %s', + cluster_id, host_id, roles, status, response + ) + + if not self.is_ok(status): + raise RuntimeError("set host roles failed") + + for role in roles: + if role in self.role_mapping: + self.role_mapping[role] = ROLE_ASSIGNED + + def set_all_hosts_roles(self, cluster_id): + for host_str in CONF.host_roles.split(';'): + host_str = host_str.strip() + hostname, roles_str = host_str.split('=', 1) + + assert(hostname in self.host_mapping) + host_id = self.host_mapping[hostname] + + roles = [role.strip() for role in roles_str.split(',') if role] + + self.set_host_roles(cluster_id, host_id, roles) + self.host_roles[hostname] = roles + + unassigned_hostnames = list(set(self.host_mapping.keys()) + - set(self.host_roles.keys())) + + unassigned_roles = [role for role, status in self.role_mapping.items() + if is_role_unassigned(status)] + + assert(len(unassigned_hostnames) >= len(unassigned_roles)) + + for hostname, role in map( + None, + unassigned_hostnames, + unassigned_roles + ): + host_id = self.host_mapping[hostname] + self.set_host_roles(cluster_id, host_id, [role]) + self.host_roles[hostname] = [role] + + unassigned_hostnames = list(set(self.host_mapping.keys()) + - set(self.host_roles.keys())) + + if not unassigned_hostnames: + return + + # assign default roles to unassigned hosts + default_roles = [ + role for role in CONF.default_roles.split(',') + if role + ] + + assert(default_roles) + + cycle_roles = itertools.cycle(default_roles) + for hostname in unassigned_hostnames: + host_id = self.host_mapping[hostname] + roles = [cycle_roles.next()] + self.set_host_roles(cluster_id, host_id, roles) + self.host_roles[hostname] = roles + + def deploy_clusters(self, cluster_id): + host_ids = self.host_mapping.values() + + status, response = self.client.review_cluster( + cluster_id, review={'hosts': host_ids} + ) + LOG.info( + 'review cluster %s hosts %s, status %s: %s', + cluster_id, host_ids, status, response + ) + + # TODO('what this doning?') + if not self.is_ok(status): + raise RuntimeError("review cluster host failed") + + status, response = self.client.deploy_cluster( + cluster_id, deploy={'hosts': host_ids} + ) + LOG.info( + 'deploy cluster %s hosts %s status %s: %s', + cluster_id, host_ids, status, response + ) + + if not self.is_ok(status): + raise RuntimeError("deploy cluster failed") + + def get_installing_progress(self, cluster_id): + """get intalling progress.""" + action_timeout = time.time() + 60 * float(CONF.action_timeout) + deployment_timeout = time.time() + 60 * float( + CONF.deployment_timeout) + + current_time = time.time() + deployment_failed = True + while current_time < deployment_timeout: + status, cluster_state = self.client.get_cluster_state(cluster_id) + LOG.info( + 'get cluster %s state status %s: %s', + cluster_id, status, cluster_state + ) + if not self.is_ok(status): + raise RuntimeError("can not get cluster state") + + if cluster_state['state'] in ['UNINITIALIZED', 'INITIALIZED']: + if current_time >= action_timeout: + deployment_failed = True + break + else: + continue + + elif cluster_state['state'] == 'SUCCESSFUL': + deployment_failed = False + break + elif cluster_state['state'] == 'ERROR': + deployment_failed = True + break + + if deployment_failed: + raise RuntimeError("deploy cluster failed") + + def check_dashboard_links(self, cluster_id): + dashboard_url = CONF.dashboard_url + if not dashboard_url: + LOG.info('no dashboarde url set') + return + dashboard_link_pattern = re.compile( + CONF.dashboard_link_pattern) + r = requests.get(dashboard_url, verify=False) + r.raise_for_status() + match = dashboard_link_pattern.search(r.text) + if match: + LOG.info( + 'dashboard login page for cluster %s can be downloaded', + cluster_id) + else: + msg = ( + '%s failed to be downloaded\n' + 'the context is:\n%s\n' + ) % (dashboard_url, r.text) + raise Exception(msg) + + +def main(): + client = CompassClient() + machines = client.get_machines() + + LOG.info('machines are %s', machines) + + client.add_subnets() + adapter_id, os_id, flavor_id = client.get_adapter() + cluster_id = client.add_cluster(adapter_id, os_id, flavor_id) + + client.add_cluster_hosts(cluster_id, machines) + client.set_host_networking() + client.set_cluster_os_config(cluster_id) + + if flavor_id: + client.set_cluster_package_config(cluster_id) + + client.set_all_hosts_roles(cluster_id) + client.deploy_clusters(cluster_id) + + client.get_installing_progress(cluster_id) + client.check_dashboard_links(cluster_id) + +if __name__ == "__main__": + CONF(args=sys.argv[1:]) + main() diff --git a/compass-deck/bin/client.sh b/compass-deck/bin/client.sh new file mode 100755 index 0000000..48c70e2 --- /dev/null +++ b/compass-deck/bin/client.sh @@ -0,0 +1,2 @@ +#!/bin/bash +/opt/compass/bin/client.py --switch_ips=172.29.8.40 --machines=00:0c:29:a7:ea:4b --adapter_name=os_only --adapter_flavor_pattern= --subnets=10.145.88.0/23,172.16.0.0/16 --cluster_name=cluster1 --domain=ods.com --default_gateway=10.145.88.1 --service_credentials= --console_credentials= --hostnames=host1 --host_networks="host1:eth0=10.145.89.201|is_mgmt,eth1=172.16.100.201|is_promiscuous" --partitions="/var=50%,/home=30%" --network_mapping= --host_roles= --dashboard_url= diff --git a/compass-deck/bin/cobbler/remove_systems.sh b/compass-deck/bin/cobbler/remove_systems.sh new file mode 100755 index 0000000..1973d43 --- /dev/null +++ b/compass-deck/bin/cobbler/remove_systems.sh @@ -0,0 +1,9 @@ +#!/bin/bash +systems=$(cobbler system list) +echo "remove systems: $systems" +for system in $systems; do + cobbler system remove --name=$system + if [[ "$?" != "0" ]]; then + echo "failed to remove system %s" + fi +done diff --git a/compass-deck/bin/compass_check.py b/compass-deck/bin/compass_check.py new file mode 100755 index 0000000..5fc7e69 --- /dev/null +++ b/compass-deck/bin/compass_check.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""compass health check.""" +import os +import os.path +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + +import compass.actions.cli as cli + +sys.exit(cli.main()) diff --git a/compass-deck/bin/compass_wsgi.py b/compass-deck/bin/compass_wsgi.py new file mode 100755 index 0000000..9e889e7 --- /dev/null +++ b/compass-deck/bin/compass_wsgi.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""compass wsgi module.""" +import os +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + +from compass.utils import flags +from compass.utils import logsetting +from compass.utils import setting_wrapper as setting + + +flags.init() +flags.OPTIONS.logfile = setting.WEB_LOGFILE +logsetting.init() + + +from compass.api import api as compass_api + + +compass_api.init() +application = compass_api.app diff --git a/compass-deck/bin/compassd b/compass-deck/bin/compassd new file mode 100755 index 0000000..fc77bb9 --- /dev/null +++ b/compass-deck/bin/compassd @@ -0,0 +1,43 @@ +#!/bin/sh + +RETVAL_CELERY=0 +RETVAL_PROGRESS_UPDATE=0 +start() { + service compass-celeryd start + RETVAL_CELERY=$? + service compass-progress-updated start + RETVAL_PROGRESS_UPDATE=$? +} + +stop() { + service compass-celeryd stop + RETVAL_CELERY=$? + service compass-progress-updated stop + RETVAL_PROGRESS_UPDATE=$? +} + +restart() { + stop + start +} +case "$1" in + start|stop|restart) + $1 + ;; + status) + service compass-celeryd status + RETVAL_CELERY=$? + service compass-progress-updated status + RETVAL_PROGRESS_UPDATE=$? + ;; + *) + echo "Usage: $0 {start|stop|status|restart}" + exit 1 + ;; +esac +if [[ "$RETVAL_CELERY" != "0" ]]; then + exit $RETVAL_CELERY +fi +if [[ "$RETVAL_PROGRESS_UPDATE" != "0" ]]; then + exit $RETVAL_PROGRESS_UPDATE +fi diff --git a/compass-deck/bin/csvdeploy.py b/compass-deck/bin/csvdeploy.py new file mode 100755 index 0000000..23b0c46 --- /dev/null +++ b/compass-deck/bin/csvdeploy.py @@ -0,0 +1,333 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""deploy cluster from csv file.""" +import ast +import copy +import csv +import os +import re +import sys + +from multiprocessing import Process +from multiprocessing import Queue +from optparse import OptionParser + +try: + from compass.apiclient.restful import Client +except ImportError: + curr_dir = os.path.dirname(os.path.realpath(__file__)) + apiclient_dir = os.path.dirname(curr_dir) + '/compass/apiclient' + sys.path.append(apiclient_dir) + from restful import Client + + +DELIMITER = "," + +# Sqlite tables +TABLES = { + 'switch_config': {'columns': ['id', 'ip', 'filter_port']}, + 'switch': {'columns': ['id', 'ip', 'credential_data']}, + 'machine': {'columns': ['id', 'mac', 'port', 'vlan', 'switch_id']}, + 'cluster': {'columns': ['id', 'name', 'security_config', + 'networking_config', 'partition_config', + 'adapter_id', 'state']}, + 'cluster_host': {'columns': ['id', 'cluster_id', 'hostname', 'machine_id', + 'config_data', 'state']}, + 'adapter': {'columns': ['id', 'name', 'os', 'target_system']}, + 'role': {'columns': ['id', 'name', 'target_system', 'description']} +} + + +def start(csv_dir, compass_url): + """Start deploy both failed clusters and new clusters.""" + # Get clusters and hosts data from CSV + clusters_data = get_csv('cluster.csv', csv_dir) + hosts_data = get_csv('cluster_host.csv', csv_dir) + data = {} + for cluster in clusters_data: + tmp = {} + tmp['cluster_data'] = cluster + tmp['hosts_data'] = [] + data[cluster['id']] = tmp + + for host in hosts_data: + cluster_id = host['cluster_id'] + if cluster_id not in data: + print ("Unknown cluster_id=%s of the host! host_id=%s!" + % (cluster_id, host['id'])) + sys.exit(1) + + data[cluster_id]['hosts_data'].append(host) + + apiClient = _APIClient(compass_url) + results_q = Queue() + ps = [] + for elem in data: + cluster_data = data[elem]['cluster_data'] + hosts_data = data[elem]['hosts_data'] + p = Process(target=apiClient.execute, + args=(cluster_data, hosts_data, results_q)) + ps.append(p) + p.start() + + for p in ps: + p.join() + + progress_file = '/'.join((csv_dir, 'progress.csv')) + write_progress_to_file(results_q, progress_file) + + +def write_progress_to_file(results_q, progress_file): + cluster_headers = ['cluster_id', 'progress_url'] + host_headers = ['host_id', 'progress_url'] + + with open(progress_file, 'wb') as f: + print "Writing all progress information to %s......" % progress_file + writer = csv.writer(f, delimiter=DELIMITER, quoting=csv.QUOTE_MINIMAL) + while not results_q.empty(): + record = results_q.get() + hosts = [] + cluster = [record['deployment']['cluster']['cluster_id'], + record['deployment']['cluster']['url']] + writer.writerow(cluster_headers) + writer.writerow(cluster) + + for elem in record['deployment']['hosts']: + host = [elem['host_id'], elem['url']] + hosts.append(host) + + writer.writerow(host_headers) + writer.writerows(hosts) + print "Done!\n" + + +def get_csv(fname, csv_dir): + """Parse csv files into python variables. + + .. note:: + all nested fields in db will be assembled. + + :param fname: CSV file name + :param csv_dir: CSV files directory + + :returns: list of dict which key is column name and value is its data. + """ + headers = [] + rows = [] + file_dir = '/'.join((csv_dir, fname)) + with open(file_dir) as f: + reader = csv.reader(f, delimiter=DELIMITER, quoting=csv.QUOTE_MINIMAL) + headers = reader.next() + rows = [x for x in reader] + + result = [] + for row in rows: + data = {} + for col_name, value in zip(headers, row): + if re.match(r'^[\d]+$', value): + # the value should be an integer + value = int(value) + elif re.match(r'^\[(\'\w*\'){1}(\s*,\s*\'\w*\')*\]$', value): + # the value should be a list + value = ast.literal_eval(value) + elif value == 'None': + value = '' + + if col_name.find('.') > 0: + tmp_result = {} + tmp_result[col_name.split('.')[-1]] = value + keys = col_name.split('.')[::-1][1:] + for key in keys: + tmp = {} + tmp[key] = tmp_result + tmp_result = tmp + merge_dict(data, tmp_result) + else: + data[col_name] = value + + result.append(data) + + return result + + +def merge_dict(lhs, rhs, override=True): + """Merge nested right dict into left nested dict recursively. + + :param lhs: dict to be merged into. + :type lhs: dict + :param rhs: dict to merge from. + :type rhs: dict + :param override: the value in rhs overide the value in left if True. + :type override: str + + :raises: TypeError if lhs or rhs is not a dict. + """ + if not rhs: + return + + if not isinstance(lhs, dict): + raise TypeError('lhs type is %s while expected is dict' % type(lhs), + lhs) + + if not isinstance(rhs, dict): + raise TypeError('rhs type is %s while expected is dict' % type(rhs), + rhs) + + for key, value in rhs.items(): + if isinstance(value, dict) and key in lhs and isinstance(lhs[key], + dict): + merge_dict(lhs[key], value, override) + else: + if override or key not in lhs: + lhs[key] = copy.deepcopy(value) + + +class _APIClient(Client): + def __init__(self, url, headers=None, proxies=None, stream=None): + super(_APIClient, self).__init__(url, headers, proxies, stream) + + def set_cluster_resource(self, cluster_id, resource, data): + url = "/clusters/%d/%s" % (cluster_id, resource) + return self._put(url, data=data) + + def execute(self, cluster_data, hosts_data, resp_results): + """The process includes creating or updating a cluster. + + The cluster configuration, add or update a host in the cluster, + and deploy the updated hosts. + + :param cluster_data: the dictionary of cluster data + """ + cluster_id = cluster_data['id'] + code, resp = self.get_cluster(cluster_id) + if code == 404: + # Create a new cluster + name = cluster_data['name'] + adapter_id = cluster_data['adapter_id'] + code, resp = self.add_cluster(name, adapter_id) + + if code != 200: + print ("Failed to create the cluster which name is " + "%s!\nError message: %s" % (name, resp['message'])) + sys.exit(1) + + # Update the config(security, networking, partition) of the cluster + security_req = {} + networking_req = {} + partition_req = {} + + security_req['security'] = cluster_data['security_config'] + networking_req['networking'] = cluster_data['networking_config'] + partition_req['partition'] = cluster_data['partition_config'] + + print "Update Security config......." + code, resp = self.set_cluster_resource(cluster_id, 'security', + security_req) + if code != 200: + print ("Failed to update Security config for cluster id=%s!\n" + "Error message: " % (cluster_id, resp['message'])) + sys.exit(1) + + print "Update Networking config......." + code, resp = self.set_cluster_resource(cluster_id, 'networking', + networking_req) + if code != 200: + print ("Failed to update Networking config for cluster id=%s!\n" + "Error message: %s" % (cluster_id, resp['message'])) + sys.exit(1) + + print "Update Partition config......." + code, resp = self.set_cluster_resource(cluster_id, 'partition', + partition_req) + if code != 200: + print ("Failed to update Partition config for cluster id=%s!\n" + "Error message: " % (cluster_id, resp['message'])) + sys.exit(1) + + deploy_list = [] + deploy_hosts_data = [] + + machines_list = [] + new_hosts_data = [] + for record in hosts_data: + if record['state'] and int(record['deploy_action']): + deploy_list.append(record['id']) + deploy_hosts_data.append(record) + + elif int(record['deploy_action']): + machines_list.append(record['machine_id']) + new_hosts_data.append(record) + + if machines_list: + # add new hosts to the cluster + code, resp = self.add_hosts(cluster_id, machines_list) + if code != 200: + print ("Failed to add hosts to the cluster id=%s!\n" + "Error message: %s.\nfailed hosts are %s" + % (cluster_id, resp['message'], resp['failedMachines'])) + sys.exit(1) + + for record, host in zip(new_hosts_data, resp['cluster_hosts']): + record['id'] = host['id'] + deploy_list.append(host['id']) + deploy_hosts_data.append(record) + + # Update the config of each host in the cluster + for host in deploy_hosts_data: + req = {} + host_id = host['id'] + print "Updating the config of host id=%s" % host['id'] + req['hostname'] = host['hostname'] + req.update(host['config_data']) + code, resp = self.update_host_config(int(host_id), raw_data=req) + if code != 200: + print ("Failed to update the config of the host id=%s!\n" + "Error message: %s" % (host_id, resp['message'])) + sys.exit(1) + + # Start to deploy the cluster + print "Start to deploy the cluster!....." + deploy_req = {"deploy": deploy_list} + code, resp = self.deploy_hosts(cluster_id, raw_data=deploy_req) + print "---Cluster Info---" + print "cluster_id url" + print (" %s %s" + % (resp['deployment']['cluster']['cluster_id'], + resp['deployment']['cluster']['url'])) + print "---Hosts Info-----" + print "host_id url" + for host in resp['deployment']['hosts']: + print " %s %s" % (host['host_id'], host['url']) + print "---------------------------------------------------------------" + print "\n" + resp_results.put(resp) + + +if __name__ == "__main__": + usage = "usage: %prog [options]" + parser = OptionParser(usage) + + parser.add_option("-d", "--csv-dir", dest="csv_dir", + help="The directory of CSV files used for depolyment") + parser.add_option("-u", "--compass-url", dest="compass_url", + help="The URL of Compass server") + (options, args) = parser.parse_args() + + if not os.exists(options.csv_dir): + print "Cannot find the directory: %s" % options.csv_dir + + start(options.csv_dir, options.compass_url) diff --git a/compass-deck/bin/delete_clusters.py b/compass-deck/bin/delete_clusters.py new file mode 100755 index 0000000..fddec17 --- /dev/null +++ b/compass-deck/bin/delete_clusters.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Scripts to delete cluster and it hosts""" +import logging +import os +import os.path +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + + +from compass.db.api import cluster as cluster_api +from compass.db.api import database +from compass.db.api import host as host_api +from compass.db.api import user as user_api +from compass.utils import flags +from compass.utils import logsetting +from compass.utils import setting_wrapper as setting + + +flags.add('clusternames', + help='comma seperated cluster names', + default='') +flags.add_bool('delete_hosts', + help='if all hosts related to the cluster will be deleted', + default=False) + + +def delete_clusters(): + clusternames = [ + clustername + for clustername in flags.OPTIONS.clusternames.split(',') + if clustername + ] + user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL) + list_cluster_args = {} + if clusternames: + list_cluster_args['name'] = clusternames + clusters = cluster_api.list_clusters( + user=user, **list_cluster_args + ) + delete_underlying_host = flags.OPTIONS.delete_hosts + for cluster in clusters: + cluster_id = cluster['id'] + cluster_api.del_cluster( + cluster_id, True, False, delete_underlying_host, user=user + ) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + database.init() + delete_clusters() diff --git a/compass-deck/bin/manage_db.py b/compass-deck/bin/manage_db.py new file mode 100755 index 0000000..3e56433 --- /dev/null +++ b/compass-deck/bin/manage_db.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""utility binary to manage database.""" +import os +import os.path +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + +from flask.ext.script import Manager + +from compass.api import app +from compass.db.api import database +from compass.db.api import switch as switch_api +from compass.db.api import user as user_api +from compass.utils import flags +from compass.utils import logsetting +from compass.utils import setting_wrapper as setting +from compass.utils import util + + +flags.add('table_name', + help='table name', + default='') +flags.add('clusters', + help=( + 'clusters and hosts of each cluster, the format is as ' + 'clusterid:hostname1,hostname2,...;...'), + default='') +flags.add_bool('async', + help='ryn in async mode', + default=True) +flags.add('switch_machines_file', + help=( + 'files for switches and machines ' + 'connected to each switch. each line in the file ' + 'is machine,<switch ip>,<switch port>,<vlan>,<mac> ' + 'or switch,<switch_ip>,<switch_vendor>,' + '<switch_version>,<switch_community>,<switch_state>'), + default='') +flags.add('search_cluster_properties', + help='comma separated properties to search in cluster config', + default='') +flags.add('print_cluster_properties', + help='comma separated cluster config properties to print', + default='') +flags.add('search_host_properties', + help='comma separated properties to search in host config', + default='') +flags.add('print_host_properties', + help='comma separated host config properties to print', + default='') + + +app_manager = Manager(app, usage="Perform database operations") + + +TABLE_MAPPING = { +} + + +@app_manager.command +def list_config(): + "List the commands." + for key, value in app.config.items(): + print key, value + + +@app_manager.command +def checkdb(): + """check if db exists.""" + if setting.DATABASE_TYPE == 'file': + if os.path.exists(setting.DATABASE_FILE): + sys.exit(0) + else: + sys.exit(1) + + sys.exit(0) + + +@app_manager.command +def createdb(): + """Creates database from sqlalchemy models.""" + database.init() + try: + database.drop_db() + except Exception: + pass + + if setting.DATABASE_TYPE == 'file': + if os.path.exists(setting.DATABASE_FILE): + os.remove(setting.DATABASE_FILE) + database.create_db() + if setting.DATABASE_TYPE == 'file': + os.chmod(setting.DATABASE_FILE, 0o777) + + +@app_manager.command +def dropdb(): + """Drops database from sqlalchemy models.""" + database.init() + database.drop_db() + + +@app_manager.command +def set_switch_machines(): + """Set switches and machines. + + .. note:: + --switch_machines_file is the filename which stores all switches + and machines information. + each line in fake_switches_files presents one machine. + the format of each line machine,<switch_ip>,<switch_port>,<vlan>,<mac> + or switch,<switch_ip>,<switch_vendor>,<switch_version>, + <switch_community>,<switch_state> + """ + if not flags.OPTIONS.switch_machines_file: + print 'flag --switch_machines_file is missing' + return + database.init() + switches, switch_machines = util.get_switch_machines_from_file( + flags.OPTIONS.switch_machines_file) + user = user_api.get_user_object( + setting.COMPASS_ADMIN_EMAIL + ) + switch_mapping = {} + for switch in switches: + added_switch = switch_api.add_switch( + False, user=user, **switch + ) + switch_mapping[switch['ip']] = added_switch['id'] + for switch_ip, machines in switch_machines.items(): + if switch_ip not in switch_mapping: + print 'switch ip %s not found' % switch_ip + sys.exit(1) + switch_id = switch_mapping[switch_ip] + for machine in machines: + switch_api.add_switch_machine( + switch_id, False, user=user, **machine + ) + + +if __name__ == "__main__": + flags.init() + logsetting.init() + app_manager.run() diff --git a/compass-deck/bin/poll_switch.py b/compass-deck/bin/poll_switch.py new file mode 100755 index 0000000..c61e1dd --- /dev/null +++ b/compass-deck/bin/poll_switch.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""main script to poll machines which is connected to the switches.""" +import functools +import logging +import os +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + +import lockfile +from multiprocessing import Pool + +from compass.actions import poll_switch +from compass.actions import util +from compass.db.api import database +from compass.db.api import switch as switch_api +from compass.db.api import user as user_api +from compass.tasks.client import celery +from compass.utils import daemonize +from compass.utils import flags +from compass.utils import logsetting +from compass.utils import setting_wrapper as setting + + +flags.add('switch_ips', + help='comma seperated switch ips', + default='') +flags.add_bool('async', + help='ryn in async mode', + default=True) +flags.add('thread_pool_size', type='int', + help='thread pool size when run in noasync mode', + default=4) +flags.add('run_interval', type='int', + help='run interval in seconds', + default=setting.POLLSWITCH_INTERVAL) + + +def pollswitches(switch_ips): + """poll switch.""" + user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL) + poll_switches = [] + all_switches = dict([ + (switch['ip'], switch['credentials']) + for switch in switch_api.list_switches(user=user) + ]) + if switch_ips: + poll_switches = dict([ + (switch_ip, all_switches[switch_ip]) + for switch_ip in switch_ips + if switch_ip in all_switches + ]) + else: + poll_switches = all_switches + + if flags.OPTIONS.async: + for switch_ip, switch_credentials in poll_switches.items(): + celery.send_task( + 'compass.tasks.pollswitch', + (user.email, switch_ip, switch_credentials) + ) + + else: + try: + pool = Pool(processes=flags.OPTIONS.thread_pool_size) + for switch_ip, switch_credentials in poll_switches.items(): + pool.apply_async( + poll_switch.poll_switch, + (user.email, switch_ip, switch_credentials) + ) + pool.close() + pool.join() + except Exception as error: + logging.error('failed to poll switches %s', + poll_switches) + logging.exception(error) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + database.init() + logging.info('run poll_switch') + daemonize.daemonize( + functools.partial( + pollswitches, + [switch_ip + for switch_ip in flags.OPTIONS.switch_ips.split(',') + if switch_ip]), + flags.OPTIONS.run_interval, + pidfile=lockfile.FileLock('/var/run/poll_switch.pid'), + stderr=open('/tmp/poll_switch_err.log', 'w+'), + stdout=open('/tmp/poll_switch_out.log', 'w+')) diff --git a/compass-deck/bin/progress_update.py b/compass-deck/bin/progress_update.py new file mode 100755 index 0000000..cc8c12b --- /dev/null +++ b/compass-deck/bin/progress_update.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""main script to run as service to update hosts installing progress.""" +import functools +import logging +import os +import sys + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(current_dir) + + +import switch_virtualenv + +import lockfile + +from compass.actions import update_progress +from compass.db.api import database +from compass.tasks.client import celery +from compass.utils import daemonize +from compass.utils import flags +from compass.utils import logsetting +from compass.utils import setting_wrapper as setting +from compass.utils import util + + +flags.add_bool('async', + help='run in async mode', + default=True) +flags.add('run_interval', type='int', + help='run interval in seconds', + default=setting.PROGRESS_UPDATE_INTERVAL) + + +def progress_update(): + """entry function.""" + if flags.OPTIONS.async: + celery.send_task('compass.tasks.update_progress', ()) + else: + try: + update_progress.update_progress() + except Exception as error: + logging.error('failed to update progress') + logging.exception(error) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + database.init() + logging.info('run progress update') + daemonize.daemonize( + progress_update, + flags.OPTIONS.run_interval, + pidfile=lockfile.FileLock('/var/run/progress_update.pid'), + stderr=open('/tmp/progress_update_err.log', 'w+'), + stdout=open('/tmp/progress_update_out.log', 'w+')) diff --git a/compass-deck/bin/query_switch.py b/compass-deck/bin/query_switch.py new file mode 100755 index 0000000..4b4b2cd --- /dev/null +++ b/compass-deck/bin/query_switch.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""query switch.""" +import optparse +import Queue +import threading +import time + +from compass.apiclient.restful import Client + + +class AddSwitch(object): + """A utility class. + + Handles adding a switch and retrieving corresponding machines + associated with the switch. + """ + + def __init__(self, server_url): + print server_url, " ...." + self._client = Client(server_url) + + def add_switch(self, queue, ip, snmp_community): + """Add a switch with SNMP credentials. + + :param queue: The result holder for the machine details. + :type queue: A Queue object(thread-safe). + :param ip: The IP address of the switch. + :type ip: string. + :param snmp_community: The SNMP community string. + :type snmp_community: string. + """ + status, resp = self._client.add_switch(ip, + version="2c", + community=snmp_community) + if status > 409: + queue.put((ip, (False, + "Failed to add the switch (status=%d)" % status))) + return + + if status == 409: + # This is the case where the switch with the same IP already + # exists in the system. We now try to update the switch + # with the given credential. + switch_id = resp['failedSwitch'] + status, resp = self._client.update_switch(switch_id, + version="2c", + community=snmp_community) + if status > 202: + queue.put((ip, (False, + "Failed to update the switch (status=%d)" % + status))) + return + + switch = resp['switch'] + state = switch['state'] + switch_id = switch['id'] + + # if the switch state is not in under_monitoring, + # wait for the poll switch task + while True: + status, resp = self._client.get_switch(switch_id) + if status > 400: + queue.put((ip, (False, "Failed to get switch status"))) + return + + switch = resp['switch'] + + state = switch['state'] + if state == 'initialized' or state == 'repolling': + time.sleep(5) + else: + break + + if state == 'under_monitoring': + # get machines connected to the switch. + status, response = self._client.get_machines(switch_id=switch_id) + if status == 200: + for machine in response['machines']: + queue.put((ip, "mac=%s, vlan=%s, port=%s dbid=%d" % ( + machine['mac'], + machine['vlan'], + machine['port'], + machine['id']))) + else: + queue.put((ip, (False, + "Failed to get machines %s" % + response['status']))) + else: + queue.put((ip, (False, "Switch state is %s" % state))) + +if __name__ == "__main__": + usage = "usage: %prog [options] switch_ips" + parser = optparse.OptionParser(usage) + + parser.add_option("-u", "--server-url", dest="server_url", + default="http://localhost/api", + help="The Compass Server URL") + + parser.add_option("-c", "--community", dest="community", + default="public", + help="Switch SNMP community string") + + (options, args) = parser.parse_args() + + if len(args) != 1: + parser.error("Wrong number of arguments") + + threads = [] + queue = Queue.Queue() + add_switch = AddSwitch(options.server_url) + + print "Add switch to the server. This may take a while ..." + for switch in args[0].split(','): + t = threading.Thread(target=add_switch.add_switch, + args=(queue, switch, options.community)) + + threads.append(t) + t.start() + + for t in threads: + t.join(60) + + while True: + try: + ip, result = queue.get(block=False) + print ip, " : ", result + except Queue.Empty: + break diff --git a/compass-deck/bin/refresh.sh b/compass-deck/bin/refresh.sh new file mode 100755 index 0000000..d867440 --- /dev/null +++ b/compass-deck/bin/refresh.sh @@ -0,0 +1,3 @@ +#!/bin/bash +/opt/compass/bin/refresh_agent.sh +/opt/compass/bin/refresh_server.sh diff --git a/compass-deck/bin/refresh_agent.sh b/compass-deck/bin/refresh_agent.sh new file mode 100755 index 0000000..13c3050 --- /dev/null +++ b/compass-deck/bin/refresh_agent.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e +# systemctl restart mysql.service +# systemctl status mysql.service || exit $? +# /opt/compass/bin/manage_db.py createdb +/opt/compass/bin/clean_installers.py --noasync +/opt/compass/bin/clean_installation_logs.py +rm -rf /var/ansible/run/* +# systemctl restart httpd.service +# systemctl status httpd.service || exit $? +systemctl restart rsyslog.service +systemctl status rsyslog.service || exit $? +systemctl restart redis.service +systemctl status redis.service || exit $? +redis-cli flushall +systemctl restart cobblerd.service +systemctl status cobblerd.service || exit $? +systemctl restart compass-celeryd.service +systemctl status compass-celeryd.service || exit $? +# systemctl restart compass-progress-updated.service +# systemctl status compass-progress-updated.service || exit $? + diff --git a/compass-deck/bin/refresh_server.sh b/compass-deck/bin/refresh_server.sh new file mode 100755 index 0000000..a93204a --- /dev/null +++ b/compass-deck/bin/refresh_server.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e +systemctl restart mysql.service +systemctl status mysql.service || exit $? +/opt/compass/bin/manage_db.py createdb +# /opt/compass/bin/clean_installers.py --noasync +# /opt/compass/bin/clean_installation_logs.py +# rm -rf /var/ansible/run/* +systemctl restart httpd.service +systemctl status httpd.service || exit $? +systemctl restart rsyslog.service +systemctl status rsyslog.service || exit $? +systemctl restart redis.service +systemctl status redis.service || exit $? +redis-cli flushall +# systemctl restart cobblerd.service +# systemctl status cobblerd.service || exit $? +# systemctl restart compass-celeryd.service +# systemctl status compass-celeryd.service || exit $? +# systemctl restart compass-progress-updated.service +# systemctl status compass-progress-updated.service || exit $? + diff --git a/compass-deck/bin/runserver.py b/compass-deck/bin/runserver.py new file mode 100755 index 0000000..b8b1a72 --- /dev/null +++ b/compass-deck/bin/runserver.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""main script to start an instance of compass server .""" +import logging + +from compass.api import app +from compass.utils import flags +from compass.utils import logsetting + + +flags.add('server_host', + help='server host address', + default='0.0.0.0') +flags.add_bool('debug', + help='run in debug mode', + default=True) + + +if __name__ == '__main__': + flags.init() + logsetting.init() + logging.info('run server') + app.run(host=flags.OPTIONS.server_host, debug=flags.OPTIONS.debug) diff --git a/compass-deck/bin/switch_virtualenv.py b/compass-deck/bin/switch_virtualenv.py new file mode 100755 index 0000000..ca843eb --- /dev/null +++ b/compass-deck/bin/switch_virtualenv.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# +# Copyright 2014 Huawei Technologies Co. Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""utility switch to virtual env.""" +import os +import os.path +import site +import sys + + +virtual_env = '/root/.virtualenvs/compass-core' +activate_this = '%s/bin/activate_this.py' % virtual_env +execfile(activate_this, dict(__file__=activate_this)) +site.addsitedir('%s/lib/python2.6/site-packages' % virtual_env) +if virtual_env not in sys.path: + sys.path.append(virtual_env) +os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg' |