aboutsummaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
Diffstat (limited to 'deploy')
-rw-r--r--deploy/client.py877
-rw-r--r--deploy/compass_vm.sh4
-rw-r--r--deploy/conf/baremetal.conf18
-rw-r--r--deploy/conf/baremetal_cluster.yml37
-rw-r--r--deploy/conf/baremetal_five.yml45
-rw-r--r--deploy/conf/base.conf17
-rw-r--r--deploy/conf/cluster.conf17
-rw-r--r--deploy/conf/five.conf17
-rw-r--r--deploy/conf/network_cfg.yaml53
-rw-r--r--deploy/conf/neutron_cfg.yaml6
-rw-r--r--deploy/conf/virtual.conf9
-rw-r--r--deploy/conf/virtual_cluster.yml26
-rw-r--r--deploy/conf/virtual_five.yml29
-rw-r--r--deploy/config_parse.py86
-rw-r--r--deploy/deploy_host.sh23
-rwxr-xr-xdeploy/deploy_parameter.sh91
-rw-r--r--deploy/host_baremetal.sh13
-rw-r--r--deploy/host_virtual.sh61
-rwxr-xr-xdeploy/launch.sh65
-rw-r--r--deploy/log.py36
-rwxr-xr-xdeploy/network.sh80
-rwxr-xr-xdeploy/prepare.sh32
-rw-r--r--deploy/restful.py1110
-rw-r--r--deploy/template/power/ipmitool.tmpl25
-rw-r--r--deploy/template/power/smmset.tmpl22
25 files changed, 2684 insertions, 115 deletions
diff --git a/deploy/client.py b/deploy/client.py
new file mode 100644
index 00000000..48602c42
--- /dev/null
+++ b/deploy/client.py
@@ -0,0 +1,877 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""binary to deploy a cluster by compass client api."""
+import os
+import re
+import socket
+import sys
+import time
+import yaml
+import netaddr
+import requests
+import json
+import itertools
+from collections import defaultdict
+from restful import Client
+
+ROLE_UNASSIGNED = True
+ROLE_ASSIGNED = False
+
+import log as logging
+LOG = logging.getLogger(__name__)
+
+from oslo_config import cfg
+CONF = cfg.CONF
+
+def byteify(input):
+ if isinstance(input, dict):
+ return dict([(byteify(key),byteify(value)) for key,value in input.iteritems()])
+ elif isinstance(input, list):
+ return [byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+opts = [
+ cfg.StrOpt('compass_server',
+ help='compass server url',
+ default='http://127.0.0.1/api'),
+ cfg.StrOpt('compass_user_email',
+ help='compass user email',
+ default='admin@huawei.com'),
+ cfg.StrOpt('compass_user_password',
+ help='compass user password',
+ default='admin'),
+ cfg.StrOpt('switch_ips',
+ help='comma seperated switch ips',
+ default=''),
+ cfg.StrOpt('switch_credential',
+ help='comma separated <credential key>=<credential value>',
+ default='version=2c,community=public'),
+ cfg.IntOpt('switch_max_retries',
+ help='max retries of poll switch',
+ default=10),
+ cfg.IntOpt('switch_retry_interval',
+ help='interval to repoll switch',
+ default=10),
+ cfg.BoolOpt('poll_switches',
+ help='if the client polls switches',
+ default=True),
+ cfg.StrOpt('machines',
+ help='comma separated mac addresses of machines',
+ default=''),
+ cfg.StrOpt('subnets',
+ help='comma seperated subnets',
+ default=''),
+ cfg.StrOpt('adapter_name',
+ help='adapter name',
+ default=''),
+ cfg.StrOpt('adapter_os_pattern',
+ help='adapter os name',
+ default=r'^(?i)centos.*'),
+ cfg.StrOpt('adapter_target_system_pattern',
+ help='adapter target system name',
+ default='^openstack$'),
+ cfg.StrOpt('adapter_flavor_pattern',
+ help='adapter flavor name',
+ default='allinone'),
+ cfg.StrOpt('cluster_name',
+ help='cluster name',
+ default='cluster1'),
+ cfg.StrOpt('language',
+ help='language',
+ default='EN'),
+ cfg.StrOpt('timezone',
+ help='timezone',
+ default='GMT'),
+ cfg.StrOpt('http_proxy',
+ help='http proxy',
+ default=''),
+ cfg.StrOpt('https_proxy',
+ help='https proxy',
+ default=''),
+ cfg.StrOpt('no_proxy',
+ help='no proxy',
+ default=''),
+ cfg.StrOpt('ntp_server',
+ help='ntp server',
+ default=''),
+ cfg.StrOpt('dns_servers',
+ help='dns servers',
+ default=''),
+ cfg.StrOpt('domain',
+ help='domain',
+ default=''),
+ cfg.StrOpt('search_path',
+ help='search path',
+ default=''),
+ cfg.StrOpt('local_repo_url',
+ help='local repo url',
+ default=''),
+ cfg.StrOpt('default_gateway',
+ help='default gateway',
+ default=''),
+ cfg.StrOpt('server_credential',
+ help=(
+ 'server credential formatted as '
+ '<username>=<password>'
+ ),
+ default='root=root'),
+ cfg.StrOpt('os_config_json_file',
+ help='json formatted os config file',
+ default=''),
+ cfg.StrOpt('service_credentials',
+ help=(
+ 'comma seperated service credentials formatted as '
+ '<servicename>:<username>=<password>,...'
+ ),
+ default=''),
+ cfg.StrOpt('console_credentials',
+ help=(
+ 'comma seperated console credential formated as '
+ '<consolename>:<username>=<password>'
+ ),
+ default=''),
+ cfg.StrOpt('hostnames',
+ help='comma seperated hostname',
+ default=''),
+ cfg.StrOpt('host_networks',
+ help=(
+ 'semicomma seperated host name and its networks '
+ '<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscuous>,...'
+ ),
+ default=''),
+ cfg.StrOpt('partitions',
+ help=(
+ 'comma seperated partitions '
+ '<partition name>=<partition_value>'
+ ),
+ default='tmp:percentage=10%,var:percentage=30%,home:percentage=30%'),
+ cfg.StrOpt('network_mapping',
+ help=(
+ 'comma seperated network mapping '
+ '<network_type>=<interface_name>'
+ ),
+ default=''),
+ cfg.StrOpt('package_config_json_file',
+ help='json formatted os config file',
+ default=''),
+ cfg.StrOpt('host_roles',
+ help=(
+ 'semicomma separated host roles '
+ '<hostname>=<comma separated roles>'
+ ),
+ default=''),
+ cfg.StrOpt('default_roles',
+ help=(
+ 'comma seperated default roles '
+ '<rolename>'
+ ),
+ default=''),
+ cfg.IntOpt('action_timeout',
+ help='action timeout in seconds',
+ default=60),
+ cfg.IntOpt('deployment_timeout',
+ help='deployment timeout in minutes',
+ default=60),
+ cfg.IntOpt('progress_update_check_interval',
+ help='progress update status check interval in seconds',
+ default=60),
+ cfg.StrOpt('dashboard_url',
+ help='dashboard url',
+ default=''),
+ cfg.StrOpt('dashboard_link_pattern',
+ help='dashboard link pattern',
+ default=r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)'),
+ cfg.StrOpt('cluster_vip',
+ help='cluster ip address',
+ default=''),
+ cfg.StrOpt('network_cfg',
+ help='netowrk config file',
+ default=''),
+ cfg.StrOpt('neutron_cfg',
+ help='netowrk config file',
+ default=''),
+ cfg.StrOpt('cluster_pub_vip',
+ help='cluster ip address',
+ default=''),
+ cfg.StrOpt('cluster_prv_vip',
+ help='cluster ip address',
+ default=''),
+]
+CONF.register_cli_opts(opts)
+
+def is_role_unassigned(role):
+ return role
+
+def _load_config(config_filename):
+ if not config_filename:
+ return {}
+ with open(config_filename) as config_file:
+ content = config_file.read()
+ return json.loads(content)
+
+
+class CompassClient(object):
+ def __init__(self):
+ LOG.info("xh: compass_server=%s" % CONF.compass_server)
+ self.client = Client(CONF.compass_server)
+ self.subnet_mapping = {}
+ self.role_mapping = {}
+ self.host_mapping = {}
+ self.host_ips = defaultdict(list)
+ self.host_roles = {}
+
+ self.login()
+
+ def is_ok(self, status):
+ if status < 300 and status >= 200:
+ return True
+
+ def login(self):
+ status, resp = self.client.get_token(
+ CONF.compass_user_email,
+ CONF.compass_user_password
+ )
+
+ LOG.info(
+ 'login status: %s, resp: %s',
+ status, resp
+ )
+ if self.is_ok(status):
+ return resp["token"]
+ else:
+ raise Exception(
+ 'failed to login %s with user %s',
+ CONF.compass_server,
+ CONF.compass_user_email
+ )
+
+ def get_machines(self):
+ status, resp = self.client.list_machines()
+ LOG.info(
+ 'get all machines status: %s, resp: %s', status, resp)
+ if not self.is_ok(status):
+ raise RuntimeError('failed to get machines')
+
+ machines_to_add = list(set([
+ machine for machine in CONF.machines.split(',')
+ if machine
+ ]))
+
+ LOG.info('machines to add: %s', machines_to_add)
+ machines_db = [str(m["mac"]) for m in resp]
+ LOG.info('machines in db: %s', machines_db)
+ assert(set(machines_db) == set(machines_to_add))
+
+ return [m["id"] for m in resp]
+
+ def get_adapter(self):
+ """get adapter."""
+ status, resp = self.client.list_adapters(name=CONF.adapter_name)
+ LOG.info(
+ 'get all adapters status: %s, resp: %s',
+ status, resp
+ )
+
+ if not self.is_ok(status) or not resp:
+ raise RuntimeError('failed to get adapters')
+
+ adapter_name = CONF.adapter_name
+ os_re = re.compile(CONF.adapter_os_pattern)
+ flavor_re = re.compile(CONF.adapter_flavor_pattern)
+
+ adapter_id = None
+ os_id = None
+ distributed_system_id = None
+ flavor_id = None
+ adapter = None
+
+ adapter = resp[0]
+ adapter_id = adapter['id']
+ distributed_system_id = adapter['distributed_system_id']
+ for supported_os in adapter['supported_oses']:
+ if not os_re or os_re.match(supported_os['name']):
+ os_id = supported_os['os_id']
+ break
+
+ if 'flavors' in adapter:
+ for flavor in adapter['flavors']:
+ if not flavor_re or flavor_re.match(flavor['name']):
+ flavor_id = flavor['id']
+ break
+
+ assert(os_id and flavor_id)
+ return (adapter_id, os_id, distributed_system_id, flavor_id)
+
+ def add_subnets(self):
+ subnets = [
+ subnet for subnet in CONF.subnets.split(',')
+ if subnet
+ ]
+
+ assert(subnets)
+
+ subnet_mapping = {}
+ for subnet in subnets:
+ try:
+ netaddr.IPNetwork(subnet)
+ except:
+ raise RuntimeError('subnet %s format is invalid' % subnet)
+
+ status, resp = self.client.add_subnet(subnet)
+ LOG.info('add subnet %s status %s response %s',
+ subnet, status, resp)
+ if not self.is_ok(status):
+ raise RuntimeError('failed to add subnet %s' % subnet)
+
+ subnet_mapping[resp['subnet']] = resp['id']
+
+ self.subnet_mapping = subnet_mapping
+
+ def add_cluster(self, adapter_id, os_id, flavor_id):
+ """add a cluster."""
+ cluster_name = CONF.cluster_name
+ assert(cluster_name)
+ status, resp = self.client.add_cluster(
+ cluster_name, adapter_id,
+ os_id, flavor_id)
+
+ if not self.is_ok(status):
+ raise RuntimeError("add cluster failed")
+
+ LOG.info('add cluster %s status: %s resp:%s',
+ cluster_name, status,resp)
+
+ if isinstance(resp, list):
+ cluster = resp[0]
+ else:
+ cluster = resp
+
+ cluster_id = cluster['id']
+ flavor = cluster.get('flavor', {})
+ roles = flavor.get('roles', [])
+
+ for role in roles:
+ if role.get('optional', False):
+ self.role_mapping[role['name']] = ROLE_ASSIGNED
+ else:
+ self.role_mapping[role['name']] = ROLE_UNASSIGNED
+
+ return cluster_id
+
+ def add_cluster_hosts(self, cluster_id, machines):
+ hostnames = [
+ hostname for hostname in CONF.hostnames.split(',')
+ if hostname
+ ]
+
+ assert(len(machines) == len(hostnames))
+
+ machines_dict = []
+ for machine_id, hostname in zip(machines, hostnames):
+ machines_dict.append({
+ 'machine_id': machine_id,
+ 'name': hostname
+ })
+
+ # add hosts to the cluster.
+ status, resp = self.client.add_hosts_to_cluster(
+ cluster_id,
+ {'machines': machines_dict})
+
+ LOG.info('add machines %s to cluster %s status: %s, resp: %s',
+ machines_dict, cluster_id, status, resp)
+
+ if not self.is_ok(status):
+ raise RuntimeError("add host to cluster failed")
+
+ for host in resp['hosts']:
+ self.host_mapping[host['hostname']] = host['id']
+
+ assert(len(self.host_mapping) == len(machines))
+
+ def set_cluster_os_config(self, cluster_id):
+ """set cluster os config."""
+ os_config = {}
+ language = CONF.language
+ timezone = CONF.timezone
+ http_proxy = CONF.http_proxy
+ https_proxy = CONF.https_proxy
+ local_repo_url = CONF.local_repo_url
+ if not https_proxy and http_proxy:
+ https_proxy = http_proxy
+
+ no_proxy = [
+ no_proxy for no_proxy in CONF.no_proxy.split(',')
+ if no_proxy
+ ]
+
+ compass_server = CONF.compass_server
+ if http_proxy:
+ for hostname, ips in self.host_ips.items():
+ no_proxy.append(hostname)
+ no_proxy.extend(ips)
+
+ ntp_server = CONF.ntp_server or compass_server
+
+ dns_servers = [
+ dns_server for dns_server in CONF.dns_servers.split(',')
+ if dns_server
+ ]
+ if not dns_servers:
+ dns_servers = [compass_server]
+
+ domain = CONF.domain
+ if not domain:
+ raise Exception('domain is not defined')
+
+ search_path = [
+ search_path for search_path in CONF.search_path.split(',')
+ if search_path
+ ]
+
+ if not search_path:
+ search_path = [domain]
+
+ default_gateway = CONF.default_gateway
+ if not default_gateway:
+ raise Exception('default gateway is not defined')
+
+
+ general_config = {
+ 'language': language,
+ 'timezone': timezone,
+ 'ntp_server': ntp_server,
+ 'dns_servers': dns_servers,
+ 'default_gateway': default_gateway
+ }
+
+ if http_proxy:
+ general_config['http_proxy'] = http_proxy
+ if https_proxy:
+ general_config['https_proxy'] = https_proxy
+ if no_proxy:
+ general_config['no_proxy'] = no_proxy
+ if domain:
+ general_config['domain'] = domain
+ if search_path:
+ general_config['search_path'] = search_path
+ if local_repo_url:
+ general_config['local_repo'] = local_repo_url
+
+ os_config["general"] = general_config
+
+ server_credential = CONF.server_credential
+ if '=' in server_credential:
+ server_username, server_password = server_credential.split('=', 1)
+ elif server_credential:
+ server_username = server_password = server_credential
+ else:
+ server_username = 'root'
+ server_password = 'root'
+
+ os_config['server_credentials'] = {
+ 'username': server_username,
+ 'password': server_password
+ }
+
+ partitions = [
+ partition for partition in CONF.partitions.split(',')
+ if partition
+ ]
+
+ partition_config = {}
+ for partition in partitions:
+ assert("=" in partition)
+
+ partition_name, partition_value = partition.split('=', 1)
+ partition_name = partition_name.strip()
+ partition_value = partition_value.strip()
+
+ assert(partition_name and partition_value)
+
+ if partition_value.endswith('%'):
+ partition_type = 'percentage'
+ partition_value = int(partition_value[:-1])
+ else:
+ partition_type = 'size'
+
+ partition_config[partition_name] = {
+ partition_type: partition_value
+ }
+
+ os_config['partition'] = partition_config
+
+ """
+ os_config_filename = CONF.os_config_json_file
+ if os_config_filename:
+ util.merge_dict(
+ os_config, _load_config(os_config_filename)
+ )
+ """
+
+ status, resp = self.client.update_cluster_config(
+ cluster_id, os_config=os_config)
+ LOG.info(
+ 'set os config %s to cluster %s status: %s, resp: %s',
+ os_config, cluster_id, status, resp)
+ if not self.is_ok(status):
+ raise RuntimeError('failed to set os config %s to cluster %s' \
+ % (os_config, cluster_id))
+
+ def set_host_networking(self):
+ """set cluster hosts networking."""
+ def get_subnet(ip_str):
+ try:
+ LOG.info("subnets: %s" % self.subnet_mapping.keys())
+ ip = netaddr.IPAddress(ip_str)
+ for cidr, subnet_id in self.subnet_mapping.items():
+ subnet = netaddr.IPNetwork(cidr)
+ if ip in subnet:
+ return True, subnet_id
+
+ LOG.info("ip %s not in %s" % (ip_str, cidr))
+ return False, None
+ except:
+ LOG.exception("ip addr %s is invalid" % ip_str)
+ return False, None
+
+ for host_network in CONF.host_networks.split(';'):
+ hostname, networks_str = host_network.split(':', 1)
+ hostname = hostname.strip()
+ networks_str = networks_str.strip()
+
+ assert(hostname in self.host_mapping)
+
+ host_id = self.host_mapping[hostname]
+ intf_list = networks_str.split(',')
+ for intf_str in intf_list:
+ interface, intf_properties = intf_str.split('=', 1)
+ intf_properties = intf_properties.strip().split('|')
+
+ assert(intf_properties)
+ ip_str = intf_properties[0]
+
+ status, subnet_id = get_subnet(ip_str)
+ if not status:
+ raise RuntimeError("ip addr %s is invalid" % ip_str)
+
+ properties = dict([
+ (intf_property, True)
+ for intf_property in intf_properties[1:]
+ ])
+
+ LOG.info(
+ 'add host %s interface %s ip %s network proprties %s',
+ hostname, interface, ip_str, properties)
+
+ status, response = self.client.add_host_network(
+ host_id, interface, ip=ip_str, subnet_id=subnet_id,
+ **properties
+ )
+
+ LOG.info(
+ 'add host %s interface %s ip %s network properties %s '
+ 'status %s: %s',
+ hostname, interface, ip_str, properties,
+ status, response
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("add host network failed")
+
+ self.host_ips[hostname].append(ip_str)
+
+ def set_cluster_package_config(self, cluster_id):
+ """set cluster package config."""
+ package_config = {"security": {}}
+
+ service_credentials = [
+ service_credential
+ for service_credential in CONF.service_credentials.split(',')
+ if service_credential
+ ]
+
+ service_credential_cfg = {}
+ LOG.info(
+ 'service credentials: %s', service_credentials
+ )
+
+ for service_credential in service_credentials:
+ if ':' not in service_credential:
+ raise Exception(
+ 'there is no : in service credential %s' % service_credential
+ )
+ service_name, service_pair = service_credential.split(':', 1)
+ if '=' not in service_pair:
+ raise Exception(
+ 'there is no = in service %s security' % service_name
+ )
+
+ username, password = service_pair.split('=', 1)
+ service_credential_cfg[service_name] = {
+ 'username': username,
+ 'password': password
+ }
+
+ console_credentials = [
+ console_credential
+ for console_credential in CONF.console_credentials.split(',')
+ if console_credential
+ ]
+
+ LOG.info(
+ 'console credentials: %s', console_credentials
+ )
+
+ console_credential_cfg = {}
+ for console_credential in console_credentials:
+ if ':' not in console_credential:
+ raise Exception(
+ 'there is no : in console credential %s' % console_credential
+ )
+ console_name, console_pair = console_credential.split(':', 1)
+ if '=' not in console_pair:
+ raise Exception(
+ 'there is no = in console %s security' % console_name
+ )
+ username, password = console_pair.split('=', 1)
+ console_credential_cfg[console_name] = {
+ 'username': username,
+ 'password': password
+ }
+
+ package_config["security"] = {"service_credentials": service_credential_cfg,
+ "console_credentials": console_credential_cfg}
+
+ network_mapping = dict([
+ network_pair.split('=', 1)
+ for network_pair in CONF.network_mapping.split(',')
+ if '=' in network_pair
+ ])
+
+ package_config['network_mapping'] = network_mapping
+
+ assert(os.path.exists(CONF.network_cfg))
+ network_cfg = yaml.load(open(CONF.network_cfg))
+ package_config["network_cfg"] = network_cfg
+
+ assert(os.path.exists(CONF.neutron_cfg))
+ neutron_cfg = yaml.load(open(CONF.neutron_cfg))
+ package_config["neutron_config"] = neutron_cfg
+
+ """
+ package_config_filename = CONF.package_config_json_file
+ if package_config_filename:
+ util.merge_dict(
+ package_config, _load_config(package_config_filename)
+ )
+ """
+ package_config['ha_proxy'] = {}
+
+ #TODO, we need two vip
+ if CONF.cluster_pub_vip:
+ package_config["ha_proxy"]["pub_vip"] = CONF.cluster_pub_vip
+
+ if CONF.cluster_prv_vip:
+ package_config["ha_proxy"]["prv_vip"] = CONF.cluster_prv_vip
+
+ if CONF.cluster_vip:
+ package_config["ha_proxy"]["vip"] = CONF.cluster_vip
+
+ status, resp = self.client.update_cluster_config(
+ cluster_id, package_config=package_config)
+ LOG.info(
+ 'set package config %s to cluster %s status: %s, resp: %s',
+ package_config, cluster_id, status, resp)
+
+ if not self.is_ok(status):
+ raise RuntimeError("set cluster package_config failed")
+
+ def set_host_roles(self, cluster_id, host_id, roles):
+ status, response = self.client.update_cluster_host(
+ cluster_id, host_id, roles=roles)
+
+ LOG.info(
+ 'set cluster %s host %s roles %s status %s: %s',
+ cluster_id, host_id, roles, status, response
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("set host roles failed")
+
+ for role in roles:
+ if role in self.role_mapping:
+ self.role_mapping[role] = ROLE_ASSIGNED
+
+ def set_all_hosts_roles(self, cluster_id):
+ for host_str in CONF.host_roles.split(';'):
+ host_str = host_str.strip()
+ hostname, roles_str = host_str.split('=', 1)
+
+ assert(hostname in self.host_mapping)
+ host_id = self.host_mapping[hostname]
+
+ roles = [role.strip() for role in roles_str.split(',') if role]
+
+ self.set_host_roles(cluster_id, host_id, roles)
+ self.host_roles[hostname] = roles
+
+ unassigned_hostnames = list(set(self.host_mapping.keys()) \
+ - set(self.host_roles.keys()))
+
+ unassigned_roles = [ role for role, status in self.role_mapping.items()
+ if is_role_unassigned(status)]
+
+ assert(len(unassigned_hostnames) >= len(unassigned_roles))
+
+ for hostname, role in map(None, unassigned_hostnames, unassigned_roles):
+ host_id = self.host_mapping[hostname]
+ self.set_host_roles(cluster_id, host_id, [role])
+ self.host_roles[hostname] = [role]
+
+ unassigned_hostnames = list(set(self.host_mapping.keys()) \
+ - set(self.host_roles.keys()))
+
+ if not unassigned_hostnames:
+ return
+
+ # assign default roles to unassigned hosts
+ default_roles = [
+ role for role in CONF.default_roles.split(',')
+ if role
+ ]
+
+ assert(default_roles)
+
+ cycle_roles = itertools.cycle(default_roles)
+ for hostname in unassigned_hostnames:
+ host_id = self.host_mapping[hostname]
+ roles = [cycle_roles.next()]
+ self.set_host_roles(cluster_id, host_id, roles)
+ self.host_roles[hostname] = roles
+
+ def deploy_clusters(self, cluster_id):
+ host_ids = self.host_mapping.values()
+
+ status, response = self.client.review_cluster(
+ cluster_id, review={'hosts': host_ids}
+ )
+ LOG.info(
+ 'review cluster %s hosts %s, status %s: %s',
+ cluster_id, host_ids, status, response
+ )
+
+ #TODO, what this doning?
+ if not self.is_ok(status):
+ raise RuntimeError("review cluster host failed")
+
+ status, response = self.client.deploy_cluster(
+ cluster_id, deploy={'hosts': host_ids}
+ )
+ LOG.info(
+ 'deploy cluster %s hosts %s status %s: %s',
+ cluster_id, host_ids, status, response
+ )
+
+ if not self.is_ok(status):
+ raise RuntimeError("deploy cluster failed")
+
+ def get_installing_progress(self, cluster_id):
+ """get intalling progress."""
+ action_timeout = time.time() + 60 * float(CONF.action_timeout)
+ deployment_timeout = time.time() + 60 * float(
+ CONF.deployment_timeout)
+
+ current_time = time.time()
+ deployment_failed = True
+ while current_time < deployment_timeout:
+ status, cluster_state = self.client.get_cluster_state(cluster_id)
+ LOG.info(
+ 'get cluster %s state status %s: %s',
+ cluster_id, status, cluster_state
+ )
+ if not self.is_ok(status):
+ raise RuntimeError("can not get cluster state")
+
+ if cluster_state['state'] in ['UNINITIALIZED', 'INITIALIZED']:
+ if current_time >= action_timeout:
+ deployment_failed = True
+ break
+ else:
+ continue
+
+ elif cluster_state['state'] == 'SUCCESSFUL':
+ deployment_failed = False
+ break
+ elif cluster_state['state'] == 'ERROR':
+ deployment_failed = True
+ break
+
+ if deployment_failed:
+ raise RuntimeError("deploy cluster failed")
+
+ def check_dashboard_links(self, cluster_id):
+ dashboard_url = CONF.dashboard_url
+ if not dashboard_url:
+ LOG.info('no dashboarde url set')
+ return
+ dashboard_link_pattern = re.compile(
+ CONF.dashboard_link_pattern)
+ r = requests.get(dashboard_url, verify=False)
+ r.raise_for_status()
+ match = dashboard_link_pattern.search(r.text)
+ if match:
+ LOG.info(
+ 'dashboard login page for cluster %s can be downloaded',
+ cluster_id)
+ else:
+ msg = (
+ '%s failed to be downloaded\n'
+ 'the context is:\n%s\n'
+ ) % (dashboard_url, r.text)
+ raise Exception(msg)
+
+
+def main():
+ client = CompassClient()
+ machines = client.get_machines()
+
+ LOG.info('machines are %s', machines)
+
+ client.add_subnets()
+ adapter_id, os_id, distributed_system_id, flavor_id = client.get_adapter()
+ cluster_id = client.add_cluster(adapter_id, os_id, flavor_id)
+
+ client.add_cluster_hosts(cluster_id, machines)
+ client.set_host_networking()
+ client.set_cluster_os_config(cluster_id)
+
+ if distributed_system_id:
+ client.set_cluster_package_config(cluster_id)
+
+ client.set_all_hosts_roles(cluster_id)
+ client.deploy_clusters(cluster_id)
+
+ client.get_installing_progress(cluster_id)
+ client.check_dashboard_links(cluster_id)
+
+if __name__ == "__main__":
+ CONF(args=sys.argv[1:])
+ main()
diff --git a/deploy/compass_vm.sh b/deploy/compass_vm.sh
index 34b2b271..4c40c1e4 100644
--- a/deploy/compass_vm.sh
+++ b/deploy/compass_vm.sh
@@ -9,6 +9,7 @@ function tear_down_compass() {
sudo umount $compass_vm_dir/new > /dev/null 2>&1
sudo rm -rf $compass_vm_dir
+
log_info "tear_down_compass success!!!"
}
@@ -17,7 +18,7 @@ function install_compass_core() {
log_info "install_compass_core enter"
sed -i "s/mgmt_next_ip:.*/mgmt_next_ip: ${COMPASS_SERVER}/g" $WORK_DIR/installer/compass-install/install/group_vars/all
echo "compass_nodocker ansible_ssh_host=$MGMT_IP ansible_ssh_port=22" > $inventory_file
- PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' python `which ansible-playbook` -e pipeline=true --private-key=$rsa_file --user=root --connection=ssh --inventory-file=$inventory_file $WORK_DIR/installer/compass-install/install/compass_nodocker.yml
+ PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' python /usr/local/bin/ansible-playbook -e pipeline=true --private-key=$rsa_file --user=root --connection=ssh --inventory-file=$inventory_file $WORK_DIR/installer/compass-install/install/compass_nodocker.yml
exit_status=$?
rm $inventory_file
log_info "install_compass_core exit"
@@ -70,6 +71,7 @@ function launch_compass() {
sudo mkisofs -quiet -r -J -R -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -hide-rr-moved -x "lost+found:" -o $new_iso $new_mnt
rm -rf $old_mnt $new_mnt
+
qemu-img create -f qcow2 $compass_vm_dir/disk.img 100G
# create vm xml
diff --git a/deploy/conf/baremetal.conf b/deploy/conf/baremetal.conf
index a97f2b99..5d42b36d 100644
--- a/deploy/conf/baremetal.conf
+++ b/deploy/conf/baremetal.conf
@@ -1,20 +1,4 @@
-export VIRT_CPUS=4
-export HOST_MACS="'64:3e:8c:4c:6d:a3' '64:3e:8c:4c:6d:37' '64:3e:8c:4c:6c:d7' '64:3e:8c:4c:6b:7b' '64:3e:8c:4c:68:2b'"
-export VIRT_MEM=16384
-export VIRT_DISK=30G
-export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
-#export 'ADAPTER_OS_PATTERN=(?i)centos-7\.1.*'
-export ADAPTER_NAME="openstack_juno"
-export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
-export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes"
-export HOSTNAMES="host1,host2,host3,host4,host5"
-export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute"
-export DEFAULT_ROLES=""
export SWITCH_IPS="192.168.124.2"
export SWITCH_CREDENTIAL="version=2c,community=public"
-export DEPLOYMENT_TIMEOUT="90"
+export DEPLOYMENT_TIMEOUT="150"
export POLL_SWITCHES_FLAG="nopoll_switches"
-export DASHBOARD_URL=""
-export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
-source ${REGTEST_DIR}/base.conf
-export VIP="10.1.0.222"
diff --git a/deploy/conf/baremetal_cluster.yml b/deploy/conf/baremetal_cluster.yml
new file mode 100644
index 00000000..798099a4
--- /dev/null
+++ b/deploy/conf/baremetal_cluster.yml
@@ -0,0 +1,37 @@
+TYPE: baremetal
+FLAVOR: cluster
+POWER_TOOL: smmset
+
+hosts:
+ - name: host1
+ mac: '64:3e:8c:4c:6d:a3'
+ location: 14
+ roles:
+ - controller
+ - ha
+
+ - name: host2
+ mac: '64:3e:8c:4c:6d:37'
+ location: 15
+ roles:
+ - controller
+ - ha
+
+ - name: host3
+ mac: '64:3e:8c:4c:6c:d7'
+ location: 16
+ roles:
+ - controller
+ - ha
+
+ - name: host4
+ mac: '64:3e:8c:4c:6b:7b'
+ location: 17
+ roles:
+ - compute
+
+ - name: host5
+ location: 18
+ mac: '64:3e:8c:4c:68:2b'
+ roles:
+ - compute
diff --git a/deploy/conf/baremetal_five.yml b/deploy/conf/baremetal_five.yml
new file mode 100644
index 00000000..6e46e3b2
--- /dev/null
+++ b/deploy/conf/baremetal_five.yml
@@ -0,0 +1,45 @@
+
+TYPE: baremetal
+FLAVOR: five
+POWER_TOOL: ipmitool
+
+ipmiUser: root
+ipmiPass: Huawei@123
+
+hosts:
+ - name: host1
+ mac: '11:22:33:44:55'
+ ipmiUser: root
+ ipmiPass: Huawei@123
+ ipmiIp: 192.168.2.100
+ roles:
+ - controller
+ - network
+
+ - name: host2
+ mac: '11:22:33:44:55'
+ ipmiIp: 192.168.2.100
+ roles:
+ - compute
+ - storage
+
+ - name: host3
+ mac: '11:22:33:44:55'
+ ipmiIp: 192.168.2.100
+ roles:
+ - compute
+ - storage
+
+ - name: host4
+ mac: '11:22:33:44:55'
+ ipmiIp: 192.168.2.100
+ roles:
+ - compute
+ - storage
+
+ - name: host5
+ mac: '11:22:33:44:55'
+ ipmiIp: 192.168.2.100
+ roles:
+ - compute
+ - storage
diff --git a/deploy/conf/base.conf b/deploy/conf/base.conf
index be346ddf..4455cc43 100644
--- a/deploy/conf/base.conf
+++ b/deploy/conf/base.conf
@@ -1,4 +1,4 @@
-export ISO_URL=http://192.168.127.11:9999/xh/work/build/work/compass.iso
+export ISO_URL=${ISO_URL:-http://58.251.166.184:9999/compass.iso}
export INSTALL_IP=${INSTALL_IP:-10.1.0.12}
export INSTALL_MASK=${INSTALL_MASK:-255.255.255.0}
export INSTALL_GW=${INSTALL_GW:-10.1.0.1}
@@ -26,15 +26,10 @@ export NAMESERVERS="$COMPASS_SERVER"
export DOMAIN="ods.com"
export PARTITIONS="/=70%,/home=5%,/tmp=5%,/var=20%"
export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
+
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-'10.1.0.50'}
-export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.50'}
-export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.50'}
-export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.50'}
export MANAGEMENT_INTERFACE=${MANAGEMENT_INTERFACE:-eth0}
-export TENANT_INTERFACE=${TENANT_INTERFACE:-eth1}
-export STORAGE_INTERFACE=${STORAGE_INTERFACE:-eth3}
-export PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth2}
-
+export DASHBOARD_URL=""
function next_ip {
ip_addr=$1
@@ -52,9 +47,9 @@ if [ -z "$HOST_NETWORKS" ]; then
STORAGE_IP=${STORAGE_IP_START}
for HOSTNAME in ${HOSTNAME_LIST[@]}; do
if [ -z "$HOST_NETWORKS" ]; then
- HOST_NETWORKS="${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt,${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP}|is_promiscuous,${STORAGE_INTERFACE}=${STORAGE_IP}"
+ HOST_NETWORKS="${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt"
else
- HOST_NETWORKS="${HOST_NETWORKS};${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt,${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP}|is_promiscuous,${STORAGE_INTERFACE}=${STORAGE_IP}"
+ HOST_NETWORKS="${HOST_NETWORKS};${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt"
fi
MANAGE_IP=$(next_ip ${MANAGE_IP})
TENANT_IP=$(next_ip ${TENANT_IP})
@@ -64,7 +59,7 @@ if [ -z "$HOST_NETWORKS" ]; then
export HOST_NETWORKS
fi
-export NETWORK_MAPPING=${NETWORK_MAPPING:-"management=${MANAGEMENT_INTERFACE},tenant=${TENANT_INTERFACE},storage=${STORAGE_INTERFACE},external=${PUBLIC_INTERFACE}"}
+export NETWORK_MAPPING=${NETWORK_MAPPING:-"install=${MANAGEMENT_INTERFACE}"}
export PROXY=""
export IGNORE_PROXY=""
diff --git a/deploy/conf/cluster.conf b/deploy/conf/cluster.conf
index ef1f5701..72602ea0 100644
--- a/deploy/conf/cluster.conf
+++ b/deploy/conf/cluster.conf
@@ -1,20 +1,7 @@
-export VIRT_NUMBER=5
-export VIRT_CPUS=4
-export VIRT_MEM=16384
-export VIRT_DISK=30G
-export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
-#export 'ADAPTER_OS_PATTERN=(?i)centos-7\.1.*'
+export ADAPTER_OS_PATTERN='(?i)ubuntu-14\.04\.3.*'
+#export ADAPTER_OS_PATTERN='(?i)centos-7\.1.*'
export ADAPTER_NAME="openstack_juno"
export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes"
-export HOSTNAMES="host1,host2,host3,host4,host5"
-export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute"
export DEFAULT_ROLES=""
-export SWITCH_IPS="1.1.1.1"
-export SWITCH_CREDENTIAL="version=2c,community=public"
-export DEPLOYMENT_TIMEOUT="60"
-export POLL_SWITCHES_FLAG="nopoll_switches"
-export DASHBOARD_URL=""
-export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
-source ${REGTEST_DIR}/base.conf
export VIP="10.1.0.222"
diff --git a/deploy/conf/five.conf b/deploy/conf/five.conf
index a40d68e9..32181b58 100644
--- a/deploy/conf/five.conf
+++ b/deploy/conf/five.conf
@@ -1,19 +1,6 @@
-export VIRT_NUMBER=5
-export VIRT_CPUS=4
-export VIRT_MEM=16384
-export VIRT_DISK=30G
-export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
-#export 'ADAPTER_OS_PATTERN=(?i)centos-7\.1.*'
+export ADAPTER_OS_PATTERN='(?i)ubuntu-14\.04\.3.*'
+#export ADAPTER_OS_PATTERN='(?i)centos-7\.1.*'
export ADAPTER_NAME="openstack_juno"
export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
export ADAPTER_FLAVOR_PATTERN="single-controller"
-export HOSTNAMES="host1,host2,host3,host4,host5"
-export HOST_ROLES="host1=controller,network;host2=compute,storage;host3=compute,storage;host4=compute,storage;host5=compute,storage"
export DEFAULT_ROLES=""
-export SWITCH_IPS="1.1.1.1"
-export SWITCH_CREDENTIAL="version=2c,community=public"
-export DEPLOYMENT_TIMEOUT="60"
-export POLL_SWITCHES_FLAG="nopoll_switches"
-export DASHBOARD_URL=""
-export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
-source ${REGTEST_DIR}/base.conf
diff --git a/deploy/conf/network_cfg.yaml b/deploy/conf/network_cfg.yaml
new file mode 100644
index 00000000..73d05ac2
--- /dev/null
+++ b/deploy/conf/network_cfg.yaml
@@ -0,0 +1,53 @@
+nic_mappings: []
+bond_mappings: []
+
+provider_net_mappings:
+ - name: br-prv
+ network: physnet
+ interface: eth1
+ type: ovs
+
+sys_intf_mappings:
+ - name: mgmt
+ interface: eth1
+ vlan_tag: 2
+ - name: storage
+ interface: eth1
+ vlan_tag: 3
+ - name: external
+ interface: br-prv
+ vlan_tag: 4
+
+ip_settings:
+ - name: mgmt
+ ip_ranges:
+ - - "172.16.1.1"
+ - "172.16.1.254"
+ cidr: "172.16.1.0/24"
+
+ - name: storage
+ ip_ranges:
+ - - "172.16.2.1"
+ - "172.16.2.254"
+ cidr: "172.16.2.0/24"
+
+ - name: external
+ ip_ranges:
+ - - "172.16.3.2"
+ - "172.16.3.100"
+ cidr: "172.16.3.0/24"
+ gw: "172.16.3.1"
+
+public_net_info:
+ network: ext-net
+ type: vlan
+ segment_id: 1000
+ subnet: ext-subnet
+ provider_network: physnet
+ router: router-ext
+ enable_dhcp: False
+ no_gateway: False
+ external_gw: "172.16.3.1"
+ floating_ip_cidr: "172.16.3.0/24"
+ floating_ip_start: "172.16.3.100"
+ floating_ip_end: "172.16.3.254"
diff --git a/deploy/conf/neutron_cfg.yaml b/deploy/conf/neutron_cfg.yaml
new file mode 100644
index 00000000..ce6e1920
--- /dev/null
+++ b/deploy/conf/neutron_cfg.yaml
@@ -0,0 +1,6 @@
+openvswitch:
+ tenant_network_type: vxlan
+ network_vlan_ranges:
+ - 'physnet:1:4094'
+ bridge_mappings:
+ - 'physnet:br-prv'
diff --git a/deploy/conf/virtual.conf b/deploy/conf/virtual.conf
new file mode 100644
index 00000000..0dbdb28b
--- /dev/null
+++ b/deploy/conf/virtual.conf
@@ -0,0 +1,9 @@
+export VIRT_NUMBER=5
+export VIRT_CPUS=4
+export VIRT_MEM=16384
+export VIRT_DISK=30G
+
+export SWITCH_IPS="1.1.1.1"
+export SWITCH_CREDENTIAL="version=2c,community=public"
+export DEPLOYMENT_TIMEOUT="150"
+export POLL_SWITCHES_FLAG="nopoll_switches"
diff --git a/deploy/conf/virtual_cluster.yml b/deploy/conf/virtual_cluster.yml
new file mode 100644
index 00000000..b3bd9e77
--- /dev/null
+++ b/deploy/conf/virtual_cluster.yml
@@ -0,0 +1,26 @@
+TYPE: virtual
+FLAVOR: cluster
+
+hosts:
+ - name: host1
+ roles:
+ - controller
+ - ha
+
+ - name: host2
+ roles:
+ - controller
+ - ha
+
+ - name: host3
+ roles:
+ - controller
+ - ha
+
+ - name: host4
+ roles:
+ - compute
+
+ - name: host5
+ roles:
+ - compute
diff --git a/deploy/conf/virtual_five.yml b/deploy/conf/virtual_five.yml
new file mode 100644
index 00000000..f42f3d62
--- /dev/null
+++ b/deploy/conf/virtual_five.yml
@@ -0,0 +1,29 @@
+TYPE: virtual
+FLAVOR: five
+
+hosts:
+ - name: host1
+ roles:
+ - controller
+ - network
+
+ - name: host2
+ roles:
+ - compute
+ - storage
+
+ - name: host3
+ roles:
+ - compute
+ - storage
+
+ - name: host4
+ roles:
+ - compute
+ - storage
+
+ - name: host5
+ roles:
+ - compute
+ - storage
+
diff --git a/deploy/config_parse.py b/deploy/config_parse.py
new file mode 100644
index 00000000..f6c0dfa4
--- /dev/null
+++ b/deploy/config_parse.py
@@ -0,0 +1,86 @@
+import os
+import yaml
+import sys
+from Cheetah.Template import Template
+
+def init(file):
+ with open(file) as fd:
+ return yaml.load(fd)
+
+def decorator(func):
+ def wrapter(s, seq):
+ host_list = s.get('hosts', [])
+ result = []
+ for host in host_list:
+ s = func(s, seq, host)
+ if not s:
+ continue
+ result.append(s)
+ if len(result) == 0:
+ return ""
+ else:
+ return "\"" + seq.join(result) + "\""
+ return wrapter
+
+@decorator
+def hostnames(s, seq, host=None):
+ return host.get('name', '')
+
+@decorator
+def hostroles(s, seq, host=None):
+ return "%s=%s" % (host.get('name', ''), ','.join(host.get('roles', [])))
+
+@decorator
+def hostmacs(s, seq, host=None):
+ return host.get('mac', '')
+
+def export_config_file(s, ofile):
+ env = {}
+ env.update(s)
+ if env.get('hosts', []):
+ env.pop('hosts')
+
+ env.update({'TYPE': s.get('TYPE', "virtual")})
+ env.update({'FLAVOR': s.get('FLAVOR', "cluster")})
+ env.update({'HOSTNAMES': hostnames(s, ',')})
+ env.update({'HOST_ROLES': hostroles(s, ';')})
+
+ value = hostmacs(s, ',')
+ if len(value) > 0:
+ env.update({'HOST_MACS': value})
+
+ os.system("echo \#config file deployment parameter > %s" % ofile)
+ for k, v in env.items():
+ os.system("echo 'export %s=${%s:-%s}' >> %s" % (k, k, v, ofile))
+
+def export_reset_file(s, tmpl_dir, output_dir, output_file):
+ tmpl_file_name = s.get('POWER_TOOL', '')
+ if not tmpl_file_name:
+ return
+
+ tmpl = Template(file=os.path.join(tmpl_dir,'power', tmpl_file_name + '.tmpl'), searchList=s)
+
+ reset_file_name = os.path.join(output_dir, tmpl_file_name + '.sh')
+ with open(reset_file_name, 'w') as f:
+ f.write(tmpl.respond())
+
+ os.system("echo 'export POWER_MANAGE=%s' >> %s" % (reset_file_name, output_file))
+
+if __name__ == "__main__":
+ if len(sys.argv) != 5:
+ print("parameter wrong%d %s" % (len(sys.argv), sys.argv))
+ sys.exit(1)
+
+ _, config_file, tmpl_dir, output_dir, output_file = sys.argv
+ config_file += '.yml'
+ if not os.path.exists(config_file):
+ print("%s is not exist" % config_file)
+ sys.exit(1)
+
+ data = init(config_file)
+
+ export_config_file(data, os.path.join(output_dir, output_file))
+ export_reset_file(data, tmpl_dir, output_dir, os.path.join(output_dir, output_file))
+
+ sys.exit(0)
+
diff --git a/deploy/deploy_host.sh b/deploy/deploy_host.sh
index f95f2594..a4dbd00a 100644
--- a/deploy/deploy_host.sh
+++ b/deploy/deploy_host.sh
@@ -1,26 +1,14 @@
function deploy_host(){
- cd $WORK_DIR/installer/compass-core
- source $WORK_DIR/venv/bin/activate
- if pip --help | grep -q trusted; then
- pip install -i http://pypi.douban.com/simple -e . --trusted-host pypi.douban.com
- else
- pip install -i http://pypi.douban.com/simple -e .
- fi
+ NETWORK_FILE=${COMPASS_DIR}/deploy/conf/network_cfg.yaml
+ NEUTRON_FILE=${COMPASS_DIR}/deploy/conf/neutron_cfg.yaml
- sudo mkdir -p /var/log/compass
- sudo chown -R 777 /var/log/compass
-
- sudo mkdir -p /etc/compass
- sudo cp -rf conf/setting /etc/compass/.
-
- cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py
- sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py
+ pip install oslo.config
ssh $ssh_args root@${COMPASS_SERVER} mkdir -p /opt/compass/bin/ansible_callbacks
scp $ssh_args -r ${COMPASS_DIR}/deploy/status_callback.py root@${COMPASS_SERVER}:/opt/compass/bin/ansible_callbacks/status_callback.py
reboot_hosts
- bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \
+ python ${COMPASS_DIR}/deploy/client.py --compass_server="${COMPASS_SERVER_URL}" \
--compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \
--cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \
--hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" \
@@ -37,5 +25,6 @@ function deploy_host(){
--host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \
--machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \
--deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \
- --cluster_vip="${VIP}"
+ --cluster_vip="${VIP}" --network_cfg="$NETWORK_FILE" --neutron_cfg="$NEUTRON_FILE"
+
}
diff --git a/deploy/deploy_parameter.sh b/deploy/deploy_parameter.sh
new file mode 100755
index 00000000..4cceb1ad
--- /dev/null
+++ b/deploy/deploy_parameter.sh
@@ -0,0 +1,91 @@
+set -x
+function get_option_name_list()
+{
+ echo $(echo "$1" | xargs -n 1 grep -oP "export .*?=" | \
+ awk '{print $2}' | sort | uniq | sed -e 's/=$//g')
+}
+function get_option_flag_list()
+{
+ echo $(echo "$1" | tr [:upper:] [:lower:] | \
+ xargs | sed -e 's/ /:,/g' -e 's/_/-/g')
+}
+
+function get_conf_name()
+{
+ cfg_file=`ls $COMPASS_DIR/deploy/conf/*.conf`
+ option_name=`get_option_name_list "$cfg_file"`
+ option_flag=`get_option_flag_list "$option_name"`
+
+ TEMP=`getopt -o h -l $option_flag -n 'deploy_parameter.sh' -- "$@"`
+
+ if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi
+ eval set -- "$TEMP"
+ while :; do
+ if [[ "$1" == "--" ]]; then
+ shift
+ break
+ fi
+ shift
+ done
+
+ if [[ $# -eq 0 ]]; then
+ echo "virtual_cluster"
+ elif [[ "$1" == "five" ]];then
+ echo "virtual_five"
+ else
+ echo $1
+ fi
+}
+
+function generate_input_env_file()
+{
+ ofile="$WORK_DIR/script/deploy_input.sh"
+
+ echo '#input deployment parameter' > $ofile
+
+ cfg_file=`ls $COMPASS_DIR/deploy/conf/{base,"$TYPE"_"$FLAVOR",$TYPE,$FLAVOR}.conf 2>/dev/null`
+ option_name=`get_option_name_list "$cfg_file"`
+ option_flag=`get_option_flag_list "$option_name"`
+
+ TEMP=`getopt -o h -l conf-name:,$option_flag -n 'deploy_parameter.sh' -- "$@"`
+
+ if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi
+ eval set -- "$TEMP"
+ while :; do
+ if [[ "$1" == "--" ]]; then
+ shift
+ break
+ fi
+
+ option=`echo ${1##-?} | tr [:lower:] [:upper:] | sed 's/-/_/g'`
+ echo "$option_name" | grep -w $option > /dev/null
+ if [[ $? -eq 0 ]]; then
+ echo "export $option=$2" >> $ofile
+ shift 2
+ continue
+ fi
+
+ echo "Internal error!"
+ exit 1
+ done
+
+ echo $ofile
+}
+
+function process_default_para()
+{
+ python ${COMPASS_DIR}/deploy/config_parse.py \
+ "${COMPASS_DIR}/deploy/conf/`get_conf_name $*`" \
+ "${COMPASS_DIR}/deploy/template" \
+ "${WORK_DIR}/script" \
+ "deploy_config.sh"
+
+ echo ${WORK_DIR}/script/deploy_config.sh
+}
+
+function process_input_para()
+{
+ input_file=`generate_input_env_file $config_file $*`
+
+ echo $input_file
+}
diff --git a/deploy/host_baremetal.sh b/deploy/host_baremetal.sh
index a543528f..9e25c98d 100644
--- a/deploy/host_baremetal.sh
+++ b/deploy/host_baremetal.sh
@@ -1,14 +1,15 @@
function reboot_hosts() {
- cmd='for i in {14..18}; do /dev/shm/smm/usr/bin/smmset -l blade$i -d bootoption -v 1 0; echo Y | /dev/shm/smm/usr/bin/smmset -l blade$i -d frucontrol -v 0; done'
- /usr/bin/expect ${COMPASS_DIR}/deploy/remote_excute.exp ${SWITCH_IPS} 'root' 'Admin@7*24' "$cmd"
+ if [ -z $POWER_MANAGE ]; then
+ return
+ fi
+ $POWER_MANAGE
}
function get_host_macs() {
local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
- local machines=`echo $HOST_MACS|sed 's/ /,/g'`
-
echo "test: true" >> $config_file
- echo "pxe_boot_macs: [${machines}]" >> $config_file
+ machine=`echo $HOST_MACS | sed -e 's/,/'\',\''/g' -e 's/^/'\''/g' -e 's/$/'\''/g'`
+ echo "pxe_boot_macs: [$machine]" >> $config_file
- echo $machines
+ echo $machine
}
diff --git a/deploy/host_virtual.sh b/deploy/host_virtual.sh
new file mode 100644
index 00000000..0754b1f4
--- /dev/null
+++ b/deploy/host_virtual.sh
@@ -0,0 +1,61 @@
+host_vm_dir=$WORK_DIR/vm
+function tear_down_machines() {
+ for i in $HOSTNAMES; do
+ sudo virsh destroy $i
+ sudo virsh undefine $i
+ rm -rf $host_vm_dir/$i
+ done
+}
+
+function reboot_hosts() {
+ log_warn "reboot_hosts do nothing"
+}
+
+function launch_host_vms() {
+ old_ifs=$IFS
+ IFS=,
+ tear_down_machines
+ #function_bod
+ mac_array=($machines)
+ log_info "bringing up pxe boot vms"
+ i=0
+ for host in $HOSTNAMES; do
+ log_info "creating vm disk for instance $host"
+ vm_dir=$host_vm_dir/$host
+ mkdir -p $vm_dir
+ sudo qemu-img create -f raw $vm_dir/disk.img ${VIRT_DISK}
+ # create vm xml
+ sed -e "s/REPLACE_MEM/$VIRT_MEM/g" \
+ -e "s/REPLACE_CPU/$VIRT_CPUS/g" \
+ -e "s/REPLACE_NAME/$host/g" \
+ -e "s#REPLACE_IMAGE#$vm_dir/disk.img#g" \
+ -e "s/REPLACE_BOOT_MAC/${mac_array[i]}/g" \
+ -e "s/REPLACE_BRIDGE_MGMT/br_install/g" \
+ -e "s/REPLACE_BRIDGE_TENANT/br_install/g" \
+ -e "s/REPLACE_BRIDGE_PUBLIC/br_install/g" \
+ -e "s/REPLACE_BRIDGE_STORAGE/br_install/g" \
+ $COMPASS_DIR/deploy/template/vm/host.xml\
+ > $vm_dir/libvirt.xml
+
+ sudo virsh define $vm_dir/libvirt.xml
+ sudo virsh start $host
+ let i=i+1
+ done
+ IFS=$old_ifs
+}
+
+function get_host_macs() {
+ local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
+ local mac_generator=${COMPASS_DIR}/deploy/mac_generator.sh
+ local machines=
+
+ chmod +x $mac_generator
+ mac_array=`$mac_generator $VIRT_NUMBER`
+ machines=`echo $mac_array|sed 's/ /,/g'`
+
+ echo "test: true" >> $config_file
+ echo "pxe_boot_macs: [${machines}]" >> $config_file
+
+ echo $machines
+}
+
diff --git a/deploy/launch.sh b/deploy/launch.sh
new file mode 100755
index 00000000..c040900c
--- /dev/null
+++ b/deploy/launch.sh
@@ -0,0 +1,65 @@
+#set -x
+WORK_DIR=$COMPASS_DIR/work/deploy
+
+mkdir -p $WORK_DIR/script
+
+source ${COMPASS_DIR}/util/log.sh
+source ${COMPASS_DIR}/deploy/deploy_parameter.sh
+source $(process_default_para $*) || exit 1
+source $(process_input_para $*) || exit 1
+source ${COMPASS_DIR}/deploy/conf/${FLAVOR}.conf
+source ${COMPASS_DIR}/deploy/conf/${TYPE}.conf
+source ${COMPASS_DIR}/deploy/conf/base.conf
+source ${COMPASS_DIR}/deploy/prepare.sh
+source ${COMPASS_DIR}/deploy/network.sh
+source ${COMPASS_DIR}/deploy/host_${TYPE}.sh
+source ${COMPASS_DIR}/deploy/compass_vm.sh
+source ${COMPASS_DIR}/deploy/deploy_host.sh
+
+######################### main process
+if true
+then
+if ! prepare_env;then
+ echo "prepare_env failed"
+ exit 1
+fi
+
+log_info "########## get host mac begin #############"
+machines=`get_host_macs`
+if [[ -z $machines ]];then
+ log_error "get_host_macs failed"
+ exit 1
+fi
+
+log_info "deploy host macs: $machines"
+export machines
+
+log_info "########## set up network begin #############"
+if ! create_nets;then
+ log_error "create_nets failed"
+ exit 1
+fi
+
+if ! launch_compass;then
+ log_error "launch_compass failed"
+ exit 1
+fi
+else
+# test code
+export machines="'00:00:16:42:da:3b','00:00:5f:73:b0:82','00:00:f1:5b:8e:81','00:00:f8:67:07:e6','00:00:53:fe:3e:98'"
+fi
+if [[ ! -z $VIRT_NUMBER ]];then
+ if ! launch_host_vms;then
+ log_error "launch_host_vms failed"
+ exit 1
+ fi
+fi
+if ! deploy_host;then
+ #tear_down_machines
+ #tear_down_compass
+ exit 1
+else
+ #tear_down_machines
+ #tear_down_compass
+ exit 0
+fi
diff --git a/deploy/log.py b/deploy/log.py
new file mode 100644
index 00000000..1c20f37e
--- /dev/null
+++ b/deploy/log.py
@@ -0,0 +1,36 @@
+import logging
+import os
+loggers = {}
+def getLogger(name):
+ if name in loggers:
+ return loggers[name]
+
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.DEBUG)
+
+ # create file handler which logs even debug messages
+ log_file = "%s.log" % name
+ try:
+ os.remove(log_file)
+ except:
+ pass
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ # create console handler with a higher log level
+ ch = logging.StreamHandler()
+ ch.setLevel(logging.ERROR)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
+ ch.setFormatter(formatter)
+ fh.setFormatter(formatter)
+
+ # add the handlers to logger
+ logger.addHandler(ch)
+ logger.addHandler(fh)
+
+ loggers[name] = logger
+ return logger
+
diff --git a/deploy/network.sh b/deploy/network.sh
index 851a685a..864ec011 100755
--- a/deploy/network.sh
+++ b/deploy/network.sh
@@ -1,32 +1,62 @@
-function destroy_nets() {
- sudo virsh net-destroy mgmt > /dev/null 2>&1
- sudo virsh net-undefine mgmt > /dev/null 2>&1
+function destroy_nat() {
+ sudo virsh net-destroy $1 2>&1
+ sudo virsh net-undefine $1 2>&1
+ rm -rf $COMPASS_DIR/deploy/work/network/$1.xml
+}
+
+function destroy_bridge()
+{
+ bridge=$1
+ nic=$2
+ ips=$(ip addr show $bridge | grep 'inet ' | awk -F' ' '{print $2}')
+ routes=$(ip route show | grep $bridge)
+
+ ip link set $bridge down
- sudo virsh net-destroy install > /dev/null 2>&1
- sudo virsh net-undefine install > /dev/null 2>&1
- rm -rf $COMPASS_DIR/deploy/work/network/*.xml
+ brctl delbr $bridge
+
+ for ip in $ips; do
+ ip addr add $ip dev $nic
+ done
+
+ echo "$routes" | while read line; do
+ echo $line | sed "s/$bridge/$nic/g" | xargs ip route add | true
+ done
}
-function setup_om_bridge() {
- local device=$1
- local gw=$2
- ip link set br_install down
- ip addr flush $device
- brctl delbr br_install
-
- brctl addbr br_install
- brctl addif br_install $device
- ip link set br_install up
-
- shift;shift
- for ip in $*;do
- ip addr add $ip dev br_install
+function create_bridge()
+{
+ bridge=$1
+ nic=$2
+ ips=$(ip addr show $nic | grep 'inet ' | awk -F' ' '{print $2}')
+ routes=$(ip route show | grep $nic)
+
+ ip addr flush $nic
+
+ brctl addbr $bridge
+ brctl addif $bridge $nic
+ ip link set $bridge up
+
+ for ip in $ips; do
+ ip addr add $ip dev $bridge
done
- route add default gw $gw
+ mask=`echo $INSTALL_MASK | awk -F'.' '{print ($1*(2^24)+$2*(2^16)+$3*(2^8)+$4)}'`
+ mask_len=`echo "obase=2;${mask}"|bc|awk -F'0' '{print length($1)}'`
+ ip addr add $INSTALL_GW/$mask_len dev $bridge
+
+ echo "$routes" | while read line; do
+ echo $line | sed "s/$nic/$bridge/g" | xargs ip route add | true
+ done
+}
+
+function setup_om_bridge() {
+ destroy_bridge br_install $OM_NIC
+ create_bridge br_install $OM_NIC
}
function setup_om_nat() {
+ destroy_nat install
# create install network
sed -e "s/REPLACE_BRIDGE/br_install/g" \
-e "s/REPLACE_NAME/install/g" \
@@ -42,8 +72,7 @@ function setup_om_nat() {
}
function create_nets() {
- destroy_nets
-
+ destroy_nat mgmt
# create mgmt network
sed -e "s/REPLACE_BRIDGE/br_mgmt/g" \
-e "s/REPLACE_NAME/mgmt/g" \
@@ -61,10 +90,7 @@ function create_nets() {
if [[ ! -z $VIRT_NUMBER ]];then
setup_om_nat
else
- mask=`echo $INSTALL_MASK | awk -F'.' '{print ($1*(2^24)+$2*(2^16)+$3*(2^8)+$4)}'`
- mask_len=`echo "obase=2;${mask}"|bc|awk -F'0' '{print length($1)}'`
- setup_om_bridge $OM_NIC $OM_GW $INSTALL_GW/$mask_len $OM_IP
+ setup_om_bridge
fi
-
}
diff --git a/deploy/prepare.sh b/deploy/prepare.sh
index 1b0ded8c..704b540a 100755
--- a/deploy/prepare.sh
+++ b/deploy/prepare.sh
@@ -1,26 +1,46 @@
+
+function download_iso()
+{
+ iso_name=`basename $ISO_URL`
+ rm -f $WORK_DIR/cache/"$iso_name.md5"
+ curl --connect-timeout 10 -o $WORK_DIR/cache/"$iso_name.md5" $ISO_URL.md5
+ if [[ -f $WORK_DIR/cache/$iso_name ]]; then
+ local_md5=`md5sum $WORK_DIR/cache/$iso_name | cut -d ' ' -f 1`
+ repo_md5=`cat $WORK_DIR/cache/$iso_name.md5 | cut -d ' ' -f 1`
+ if [[ "$local_md5" == "$repo_md5" ]]; then
+ return
+ fi
+ fi
+
+ curl --connect-timeout 10 -o $WORK_DIR/cache/$iso_name $ISO_URL
+}
+
+
function prepare_env() {
export PYTHONPATH=/usr/lib/python2.7/dist-packages:/usr/local/lib/python2.7/dist-packages
sudo apt-get update -y
- sudo apt-get install mkisofs bc
+ sudo apt-get install mkisofs bc curl
sudo apt-get install git python-pip python-dev -y
sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev -y
sudo pip install --upgrade pip
sudo pip install --upgrade ansible
sudo pip install --upgrade virtualenv
+ sudo pip install --upgrade netaddr
+ sudo pip install --upgrade oslo.config
sudo service libvirt-bin restart
# prepare work dir
- sudo rm -rf $WORK_DIR
- mkdir -p $WORK_DIR
+ rm -rf $WORK_DIR/{installer,vm,network,iso,venv}
mkdir -p $WORK_DIR/installer
mkdir -p $WORK_DIR/vm
mkdir -p $WORK_DIR/network
mkdir -p $WORK_DIR/iso
mkdir -p $WORK_DIR/venv
+ mkdir -p $WORK_DIR/cache
- if [[ ! -f centos.iso ]];then
- wget -O $WORK_DIR/iso/centos.iso $ISO_URL
- fi
+ download_iso
+
+ cp $WORK_DIR/cache/`basename $ISO_URL` $WORK_DIR/iso/centos.iso -f
# copy compass
mkdir -p $WORK_DIR/mnt
diff --git a/deploy/restful.py b/deploy/restful.py
new file mode 100644
index 00000000..4d86da82
--- /dev/null
+++ b/deploy/restful.py
@@ -0,0 +1,1110 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass api client library.
+"""
+
+import json
+import logging
+import requests
+
+class Client(object):
+ """compass restful api wrapper"""
+
+ def __init__(self, url, headers=None, proxies=None, stream=None):
+ logging.info('create api client %s', url)
+ self.url_ = url
+ self.session_ = requests.Session()
+
+ if headers:
+ self.session_.headers.update(headers)
+ self.session_.headers.update({
+ 'Accept': 'application/json'
+ })
+
+ if proxies is not None:
+ self.session_.proxies = proxies
+
+ if stream is not None:
+ self.session_.stream = stream
+
+ def __del__(self):
+ self.session_.close()
+
+ @classmethod
+ def _get_response(cls, resp):
+ response_object = {}
+ try:
+ response_object = resp.json()
+ except Exception as error:
+ logging.error('failed to load object from %s: %s',
+ resp.url, resp.content)
+ logging.exception(error)
+ response_object['status'] = 'Json Parsing Failed'
+ response_object['message'] = resp.content
+
+ return resp.status_code, response_object
+
+ def _get(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('get %s with data %s', url, data)
+ if data:
+ resp = self.session_.get(url, params=data)
+ else:
+ resp = self.session_.get(url)
+
+ return self._get_response(resp)
+
+ def _post(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('post %s with data %s', url, data)
+ if data:
+ resp = self.session_.post(url, json.dumps(data))
+ else:
+ resp = self.session_.post(url)
+
+ return self._get_response(resp)
+
+ def _put(self, req_url, data=None):
+ """encapsulate put method."""
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('put %s with data %s', url, data)
+ if data:
+ resp = self.session_.put(url, json.dumps(data))
+ else:
+ resp = self.session_.put(url)
+
+ return self._get_response(resp)
+
+ def _patch(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('patch %s with data %s', url, data)
+ if data:
+ resp = self.session_.patch(url, json.dumps(data))
+ else:
+ resp = self.session_.patch(url)
+
+ return self._get_response(resp)
+
+ def _delete(self, req_url):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('delete %s', url)
+ return self._get_response(self.session_.delete(url))
+
+ def login(self, email, password):
+ credential = {}
+ credential['email'] = email
+ credential['password'] = password
+ return self._post('/users/login', data=credential)
+
+ def get_token(self, email, password):
+ credential = {}
+ credential['email'] = email
+ credential['password'] = password
+ status, resp = self._post('/users/token', data=credential)
+ if status < 400:
+ self.session_.headers.update({'X-Auth-Token': resp['token']})
+ return status, resp
+
+ def get_users(self):
+ users = self._get('/users')
+ return users
+
+ def list_switches(
+ self,
+ switch_ips=None,
+ switch_ip_networks=None):
+ """list switches."""
+ params = {}
+ if switch_ips:
+ params['switchIp'] = switch_ips
+
+ if switch_ip_networks:
+ params['switchIpNetwork'] = switch_ip_networks
+
+ switchlist = self._get('/switches', data=params)
+ return switchlist
+
+ def get_switch(self, switch_id):
+ return self._get('/switches/%s' % switch_id)
+
+ def add_switch(
+ self,
+ switch_ip,
+ version=None,
+ community=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['ip'] = switch_ip
+ data['credentials'] = {}
+ if version:
+ data['credentials']['version'] = version
+
+ if community:
+ data['credentials']['community'] = community
+
+ return self._post('/switches', data=data)
+
+ def update_switch(self, switch_id, state='initialized',
+ version='2c', community='public', raw_data={}):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ data['credentials'] = {}
+ if version:
+ data['credentials']['version'] = version
+
+ if community:
+ data['credentials']['community'] = community
+
+ if state:
+ data['state'] = state
+
+ return self._put('/switches/%s' % switch_id, data=data)
+
+ def delete_switch(self, switch_id):
+ return self._delete('/switches/%s' % switch_id)
+
+ def list_switch_machines(self, switch_id, port=None, vlans=None,
+ tag=None, location=None):
+ data = {}
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/switches/%s/machines' % switch_id, data=data)
+
+ def get_switch_machine(self, switch_id, machine_id):
+ return self._get('/switches/%s/machines/%s' % (switch_id, machine_id))
+
+ def list_switch_machines_hosts(self, switch_id, port=None, vlans=None,
+ mac=None, tag=None, location=None,
+ os_name=None, os_id=None):
+
+ data = {}
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/switches/%s/machines-hosts' % switch_id, data=data)
+
+ def add_switch_machine(self, switch_id, mac=None, port=None,
+ vlans=None, ipmi_credentials=None,
+ tag=None, location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if mac:
+ data['mac'] = mac
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._post('/switches/%s/machines' % switch_id, data=data)
+
+ def update_switch_machine(self, switch_id, machine_id, port=None,
+ vlans=None, ipmi_credentials=None, tag=None,
+ location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._put('/switches/%s/machines/%s' %
+ (switch_id, machine_id), data=data)
+
+ def delete_switch_machine(self, switch_id, machine_id):
+ return self._delete('/switches/%s/machines/%s' %
+ (switch_id, machine_id))
+
+ # test these
+ def poll_switch(self, switch_id):
+ data = {}
+ data['find_machines'] = None
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def add_group_switch_machines(self, switch_id, group_machine_ids):
+ data = {}
+ data['add_machines'] = group_machine_ids
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def remove_group_switch_machines(self, switch_id, group_machine_ids):
+ data = {}
+ data['remove_machines'] = group_machine_ids
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def update_group_switch_machines(self, switch_id, group_machines):
+ data = {}
+ data['set_machines'] = group_machines
+ return self._post('/switches/%s/action' % switch_id, data=data)
+ # end
+
+ def list_switchmachines(self, switch_ip_int=None, port=None, vlans=None,
+ mac=None, tag=None, location=None):
+ data = {}
+ if switch_ip_int:
+ data['switch_ip_int'] = switch_ip_int
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/switch-machines', data=data)
+
+ def list_switchmachines_hosts(self, switch_ip_int=None, port=None,
+ vlans=None, mac=None, tag=None,
+ location=None, os_name=None, os_id=None):
+
+ data = {}
+ if switch_ip_int:
+ data['switch_ip_int'] = switch_ip_int
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/switches-machines-hosts', data=data)
+
+ def show_switchmachine(self, switchmachine_id):
+ return self._get('/switch-machines/%s' % switchmachine_id)
+
+ def update_switchmachine(self, switchmachine_id,
+ port=None, vlans=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ return self._put('/switch-machines/%s' % switchmachine_id, data=data)
+
+ def patch_switchmachine(self, switchmachine_id,
+ vlans=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ elif vlans:
+ data['vlans'] = vlans
+
+ return self._patch('/switch-machines/%s' % switchmachine_id, data=data)
+
+ def delete_switchmachine(self, switchmachine_id):
+ return self._delete('/switch-machines/%s' % switchmachine_id)
+
+ def list_machines(self, mac=None, tag=None, location=None):
+ data = {}
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/machines', data=data)
+
+ def get_machine(self, machine_id):
+ data = {}
+ if id:
+ data['id'] = id
+
+ return self._get('/machines/%s' % machine_id, data=data)
+
+ def update_machine(self, machine_id, ipmi_credentials=None, tag=None,
+ location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._put('/machines/%s' % machine_id, data=data)
+
+ def patch_machine(self, machine_id, ipmi_credentials=None,
+ tag=None, location=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._patch('/machines/%s' % machine_id, data=data)
+
+ def delete_machine(self, machine_id):
+ return self._delete('machines/%s' % machine_id)
+
+ def list_subnets(self, subnet=None, name=None):
+ data = {}
+ if subnet:
+ data['subnet'] = subnet
+
+ if name:
+ data['name'] = name
+
+ return self._get('/subnets', data=data)
+
+ def get_subnet(self, subnet_id):
+ return self._get('/subnets/%s' % subnet_id)
+
+ def add_subnet(self, subnet, name=None, raw_data=None):
+ data = {}
+ data['subnet'] = subnet
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if name:
+ data['name'] = name
+
+ return self._post('/subnets', data=data)
+
+ def update_subnet(self, subnet_id, subnet=None,
+ name=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if subnet:
+ data['subnet'] = subnet
+
+ if name:
+ data['name'] = name
+
+ return self._put('/subnets/%s' % subnet_id, data=data)
+
+ def delete_subnet(self, subnet_id):
+ return self._delete('/subnets/%s' % subnet_id)
+
+ def list_adapters(self, name=None, distributed_system_name=None,
+ os_installer_name=None, package_installer_name=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ if distributed_system_name:
+ data['distributed_system_name'] = distributed_system_name
+
+ if os_installer_name:
+ data['os_installer_name'] = os_installer_name
+
+ if package_installer_name:
+ data['package_installer_name'] = package_installer_name
+
+ return self._get('/adapters', data=data)
+
+ def get_adapter(self, adapter_id):
+ return self._get('/adapters/%s' % adapter_id)
+
+ def get_adapter_roles(self, adapter_id):
+ return self._get('/adapters/%s/roles' % adapter_id)
+
+ def get_adapter_metadata(self, adapter_id):
+ return self._get('/adapters/%s/metadata' % adapter_id)
+
+ def get_os_metadata(self, os_id):
+ return self._get('/oses/%s/metadata' % os_id)
+
+ def list_clusters(self, name=None, os_name=None,
+ distributed_system_name=None, owner=None,
+ adapter_id=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if distributed_system_name:
+ data['distributed_system_name'] = distributed_system_name
+
+ if owner:
+ data['owner'] = owner
+
+ if adapter_id:
+ data['adapter_id'] = adapter_id
+
+ return self._get('/clusters', data=data)
+
+ def get_cluster(self, cluster_id):
+ return self._get('/clusters/%s' % cluster_id)
+
+ def add_cluster(self, name, adapter_id, os_id,
+ flavor_id=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if flavor_id:
+ data['flavor_id'] = flavor_id
+ data['name'] = name
+ data['adapter_id'] = adapter_id
+ data['os_id'] = os_id
+
+ return self._post('/clusters', data=data)
+
+ def update_cluster(self, cluster_id, name=None,
+ reinstall_distributed_system=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_distributed_system:
+ data['reinstall_distributed_system'] = (
+ reinstall_distributed_system
+ )
+ return self._put('/clusters/%s' % cluster_id, data=data)
+
+ def delete_cluster(self, cluster_id):
+ return self._delete('/clusters/%s' % cluster_id)
+
+ def get_cluster_config(self, cluster_id):
+ return self._get('/clusters/%s/config' % cluster_id)
+
+ def get_cluster_metadata(self, cluster_id):
+ return self._get('/clusters/%s/metadata' % cluster_id)
+
+ def update_cluster_config(self, cluster_id, os_config=None,
+ package_config=None, config_step=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ if config_step:
+ data['config_step'] = config_step
+
+ return self._put('/clusters/%s/config' % cluster_id, data=data)
+
+ def patch_cluster_config(self, cluster_id, os_config=None,
+ package_config=None, config_step=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ if config_step:
+ data['config_step'] = config_step
+
+ return self._patch('/clusters/%s/config' % cluster_id, data=data)
+
+ def delete_cluster_config(self, cluster_id):
+ return self._delete('/clusters/%s/config' % cluster_id)
+
+ # test these
+ def add_hosts_to_cluster(self, cluster_id, hosts):
+ data = {}
+ data['add_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def set_hosts_in_cluster(self, cluster_id, hosts):
+ data = {}
+ data['set_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def remove_hosts_from_cluster(self, cluster_id, hosts):
+ data = {}
+ data['remove_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def review_cluster(self, cluster_id, review={}):
+ data = {}
+ data['review'] = review
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def deploy_cluster(self, cluster_id, deploy={}):
+ data = {}
+ data['deploy'] = deploy
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def get_cluster_state(self, cluster_id):
+ return self._get('/clusters/%s/state' % cluster_id)
+
+ def list_cluster_hosts(self, cluster_id):
+ return self._get('/clusters/%s/hosts' % cluster_id)
+
+ def list_clusterhosts(self):
+ return self._get('/clusterhosts')
+
+ def get_cluster_host(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s' % (cluster_id, host_id))
+
+ def get_clusterhost(self, clusterhost_id):
+ return self._get('/clusterhosts/%s' % clusterhost_id)
+
+ def add_cluster_host(self, cluster_id, machine_id=None, name=None,
+ reinstall_os=None, raw_data=None):
+ data = {}
+ data['machine_id'] = machine_id
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_os:
+ data['reinstall_os'] = reinstall_os
+
+ return self._post('/clusters/%s/hosts' % cluster_id, data=data)
+
+ def delete_cluster_host(self, cluster_id, host_id):
+ return self._delete('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id))
+
+ def delete_clusterhost(self, clusterhost_id):
+ return self._delete('/clusterhosts/%s' % clusterhost_id)
+
+ def get_cluster_host_config(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id))
+
+ def get_clusterhost_config(self, clusterhost_id):
+ return self._get('/clusterhosts/%s/config' % clusterhost_id)
+
+ def update_cluster_host_config(self, cluster_id, host_id,
+ os_config=None,
+ package_config=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._put('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id), data=data)
+
+ def update_clusterhost_config(self, clusterhost_id, os_config=None,
+ package_config=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._put('/clusterhosts/%s/config' % clusterhost_id,
+ data=data)
+
+ def patch_cluster_host_config(self, cluster_id, host_id,
+ os_config=None,
+ package_config=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._patch('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id), data=data)
+
+ def patch_clusterhost_config(self, clusterhost_id, os_config=None,
+ package_config=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._patch('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def delete_cluster_host_config(self, cluster_id, host_id):
+ return self._delete('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id))
+
+ def delete_clusterhost_config(self, clusterhost_id):
+ return self._delete('/clusterhosts/%s/config' % clusterhost_id)
+
+ def get_cluster_host_state(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s/state' %
+ (cluster_id, host_id))
+
+ def update_cluster_host(self, cluster_id, host_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id), data=data)
+
+ def update_clusterhost(self, clusterhost_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def patch_cluster_host(self, cluster_id, host_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._patch('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id), data=data)
+
+ def patch_clusterhost(self, clusterhost_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._patch('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def get_clusterhost_state(self, clusterhost_id):
+ return self._get('/clusterhosts/%s/state' % clusterhost_id)
+
+ def update_cluster_host_state(self, cluster_id, host_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/clusters/%s/hosts/%s/state' % (cluster_id, host_id),
+ data=data)
+
+ def update_clusterhost_state(self, clusterhost_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/clusterhosts/%s/state' % clusterhost_id, data=data)
+
+ def list_hosts(self, name=None, os_name=None, owner=None, mac=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if owner:
+ data['owner'] = owner
+
+ if mac:
+ data['mac'] = mac
+
+ return self._get('/hosts', data=data)
+
+ def get_host(self, host_id):
+ return self._get('/hosts/%s' % host_id)
+
+ def list_machines_or_hosts(self, mac=None, tag=None,
+ location=None, os_name=None,
+ os_id=None):
+ data = {}
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/machines-hosts', data=data)
+
+ def get_machine_or_host(self, host_id):
+ return self._get('/machines-hosts/%s' % host_id)
+
+ def update_host(self, host_id, name=None,
+ reinstall_os=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_os:
+ data['reinstall_os'] = reinstall_os
+
+ return self._put('/hosts/%s' % host_id, data=data)
+
+ def delete_host(self, host_id):
+ return self._delete('/hosts/%s' % host_id)
+
+ def get_host_clusters(self, host_id):
+ return self._get('/hosts/%s/clusters' % host_id)
+
+ def get_host_config(self, host_id):
+ return self._get('/hosts/%s/config' % host_id)
+
+ def update_host_config(self, host_id, os_config, raw_data=None):
+ data = {}
+ data['os_config'] = os_config
+ if raw_data:
+ data.update(raw_data)
+
+ return self._put('/hosts/%s/config' % host_id, data=data)
+
+ def patch_host_config(self, host_id, os_config, raw_data=None):
+ data = {}
+ data['os_config'] = os_config
+ if raw_data:
+ data.update(raw_data)
+
+ return self._patch('/hosts/%s/config' % host_id, data=data)
+
+ def delete_host_config(self, host_id):
+ return self._delete('/hosts/%s/config' % host_id)
+
+ def list_host_networks(self, host_id, interface=None, ip=None,
+ subnet=None, is_mgmt=None, is_promiscuous=None):
+ data = {}
+ if interface:
+ data['interface'] = interface
+
+ if ip:
+ data['ip'] = ip
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._get('/hosts/%s/networks' % host_id, data=data)
+
+ def list_all_host_networks(self, interface=None, ip=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None):
+ data = {}
+ if interface:
+ data['interface'] = interface
+
+ if ip:
+ data['ip'] = ip
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._get('/host-networks', data=data)
+
+ def get_host_network(self, host_id, host_network_id):
+ return self._get('/hosts/%s/networks/%s' %
+ (host_id, host_network_id))
+
+ def get_network_for_all_hosts(self, host_network_id):
+ return self._get('/host-networks/%s' % host_network_id)
+
+ def add_host_network(self, host_id, interface, ip, subnet_id,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ data['interface'] = interface
+ data['ip'] = ip
+ data['subnet_id'] = subnet_id
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._post('/hosts/%s/networks' % host_id, data=data)
+
+ def update_host_network(self, host_id, host_network_id,
+ ip=None, subnet_id=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ip:
+ data['ip'] = ip
+
+ if subnet_id:
+ data['subnet_id'] = subnet_id
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._put('/hosts/%s/networks/%s' %
+ (host_id, host_network_id), data=data)
+
+ def update_hostnetwork(self, host_network_id, ip=None,
+ subnet_id=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ip:
+ data['ip'] = ip
+
+ if subnet_id:
+ data['subnet_id'] = subnet_id
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._put('/host-networks/%s' % host_network_id,
+ data=data)
+
+ def delete_host_network(self, host_id, host_network_id):
+ return self._delete('/hosts/%s/networks/%s',
+ (host_id, host_network_id))
+
+ def delete_hostnetwork(self, host_network_id):
+ return self._delete('/host-networks/%s' % host_network_id)
+
+ def get_host_state(self, host_id):
+ return self._get('/hosts/%s/state' % host_id)
+
+ def update_host_state(self, host_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/hosts/%s/state' % host_id, date=data)
+
+ def poweron_host(self, host_id):
+ data = {}
+ data['poweron'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def poweroff_host(self, host_id):
+ data = {}
+ data['poweroff'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def reset_host(self, host_id):
+ data = {}
+ data['reset'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def clusterhost_ready(self, clusterhost_name):
+ data = {}
+ data['ready'] = True
+
+ return self._post('/clusterhosts/%s/state_internal' %
+ clusterhost_name, data=data)
diff --git a/deploy/template/power/ipmitool.tmpl b/deploy/template/power/ipmitool.tmpl
new file mode 100644
index 00000000..a3157132
--- /dev/null
+++ b/deploy/template/power/ipmitool.tmpl
@@ -0,0 +1,25 @@
+#set default_ipmiUser=$getVar('ipmiUser', '')
+#set default_ipmiPass=$getVar('ipmiPass', '')
+#set hosts=$getVar('hosts', [])
+#set fail_list=""
+#for host in $hosts
+#set host_name=$host.get('name', '')
+#set ipmiIp=$host.get('ipmiIp', '')
+#set ipmiUser=$host.get('ipmiUser', $default_ipmiUser)
+#set ipmiPass=$host.get('ipmiPass', $default_ipmiPass)
+#if not ($ipmiIp and $ipmiUser and $ipmiPass)
+#set fail_list=$fail_list + $host_name + " "
+#end if
+#end for
+#if fail_list
+echo $fail_list.strip() ipmi config is not right
+exit 1
+#else
+#for host in $hosts
+#set ipmiIp=$host.get('ipmiIp', '')
+#set ipmiUser=$host.get('ipmiUser', $default_ipmiUser)
+#set ipmiPass=$host.get('ipmiPass', $default_ipmiPass)
+ipmitool -I lan -H $ipmiIp -U $ipmiUser -P $ipmiPass chassis bootdev pxe
+ipmitool -I lan -H $ipmiIp -U $ipmiUser -P $ipmiPass chassis power reset
+#end for
+#end if
diff --git a/deploy/template/power/smmset.tmpl b/deploy/template/power/smmset.tmpl
new file mode 100644
index 00000000..e673a682
--- /dev/null
+++ b/deploy/template/power/smmset.tmpl
@@ -0,0 +1,22 @@
+#set hosts=$getVar('hosts', [])
+#set fail_list=""
+#set location_list=""
+#for host in $hosts
+#set host_name=$host.get('name', '')
+#set location=$host.get('location', '')
+#if not $location
+#set fail_list=$fail_list + $host_name + " "
+#else
+#set location_list=$location_list + str($location)+ " "
+#end if
+#end for
+#if fail_list
+echo $fail_list.strip() config is wrong
+exit 1
+#else
+cmd='for i in $location_list.strip(); do \
+ /dev/shm/smm/usr/bin/smmset -l blade\$i -d bootoption -v 1 0; \
+ echo Y | /dev/shm/smm/usr/bin/smmset -l blade\$i -d frucontrol -v 0; done'
+
+/usr/bin/expect \${COMPASS_DIR}/deploy/remote_excute.exp \${SWITCH_IPS} 'root' 'Admin@7*24' "\$cmd"
+#end if