summaryrefslogtreecommitdiffstats
path: root/fuel/deploy/cloud_deploy
diff options
context:
space:
mode:
Diffstat (limited to 'fuel/deploy/cloud_deploy')
-rw-r--r--fuel/deploy/cloud_deploy/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/cloud/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/cloud/common.py51
-rw-r--r--fuel/deploy/cloud_deploy/cloud/configure_environment.py74
-rw-r--r--fuel/deploy/cloud_deploy/cloud/configure_network.py62
-rw-r--r--fuel/deploy/cloud_deploy/cloud/configure_nodes.py108
-rw-r--r--fuel/deploy/cloud_deploy/cloud/configure_settings.py47
-rw-r--r--fuel/deploy/cloud_deploy/cloud/dea.py86
-rw-r--r--fuel/deploy/cloud_deploy/cloud/deploy.py208
-rw-r--r--fuel/deploy/cloud_deploy/cloud/deployment.py100
-rw-r--r--fuel/deploy/cloud_deploy/cloud_deploy.py117
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/dha.py61
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py433
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py110
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py153
-rw-r--r--fuel/deploy/cloud_deploy/ssh_client.py56
19 files changed, 1671 insertions, 0 deletions
diff --git a/fuel/deploy/cloud_deploy/__init__.py b/fuel/deploy/cloud_deploy/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/cloud/__init__.py b/fuel/deploy/cloud_deploy/cloud/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/cloud/common.py b/fuel/deploy/cloud_deploy/cloud/common.py
new file mode 100644
index 0000000..365f6fb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/common.py
@@ -0,0 +1,51 @@
+import subprocess
+import sys
+import os
+import logging
+
+N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5,
+ 'roles': 6, 'pending_roles': 7, 'online': 8}
+E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4,
+ 'changes': 5, 'pending_release_id': 6}
+R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4}
+RO = {'name': 0, 'conflicts': 1}
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+formatter = logging.Formatter('%(message)s')
+out_handler = logging.StreamHandler(sys.stdout)
+out_handler.setFormatter(formatter)
+LOG.addHandler(out_handler)
+out_handler = logging.FileHandler('autodeploy.log', mode='w')
+out_handler.setFormatter(formatter)
+LOG.addHandler(out_handler)
+
+def exec_cmd(cmd):
+ process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ shell=True)
+ return process.communicate()[0], process.returncode
+
+def run_proc(cmd):
+ process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ shell=True)
+ return process
+
+def parse(printout, *args):
+ parsed_list = []
+ lines = printout[0].splitlines()
+ for l in lines[2:]:
+ parsed = [e.strip() for e in l.split('|')]
+ parsed_list.append(parsed)
+ return parsed_list
+
+def err(error_message):
+ LOG.error(error_message)
+ sys.exit(1)
+
+def check_file_exists(file_path):
+ if not os.path.isfile(file_path):
+ err('ERROR: File %s not found\n' % file_path)
diff --git a/fuel/deploy/cloud_deploy/cloud/configure_environment.py b/fuel/deploy/cloud_deploy/cloud/configure_environment.py
new file mode 100644
index 0000000..426bbd1
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/configure_environment.py
@@ -0,0 +1,74 @@
+import common
+import os
+import shutil
+
+from configure_settings import ConfigureSettings
+from configure_network import ConfigureNetwork
+from configure_nodes import ConfigureNodes
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+LOG = common.LOG
+
+class ConfigureEnvironment(object):
+
+ def __init__(self, dea, yaml_config_dir, release_id, node_id_roles_dict):
+ self.env_id = None
+ self.dea = dea
+ self.yaml_config_dir = yaml_config_dir
+ self.env_name = dea.get_environment_name()
+ self.release_id = release_id
+ self.node_id_roles_dict = node_id_roles_dict
+ self.required_networks = []
+
+ def env_exists(self, env_name):
+ env_list = parse(exec_cmd('fuel env --list'))
+ for env in env_list:
+ if env[E['name']] == env_name and env[E['status']] == 'new':
+ self.env_id = env[E['id']]
+ return True
+ return False
+
+ def configure_environment(self):
+ LOG.debug('Configure environment\n')
+ if os.path.exists(self.yaml_config_dir):
+ LOG.debug('Deleting existing config directory %s\n'
+ % self.yaml_config_dir)
+ shutil.rmtree(self.yaml_config_dir)
+ LOG.debug('Creating new config directory %s\n' % self.yaml_config_dir)
+ os.makedirs(self.yaml_config_dir)
+
+ LOG.debug('Creating environment %s release %s, mode ha, network-mode '
+ 'neutron, net-segment-type vlan\n'
+ % (self.env_name, self.release_id))
+ exec_cmd('fuel env create --name %s --release %s --mode ha '
+ '--network-mode neutron --net-segment-type vlan'
+ % (self.env_name, self.release_id))
+
+ if not self.env_exists(self.env_name):
+ err("Failed to create environment %s\n" % self.env_name)
+ self.config_settings()
+ self.config_network()
+ self.config_nodes()
+
+ def config_settings(self):
+ settings = ConfigureSettings(self.yaml_config_dir, self.env_id,
+ self.dea)
+ settings.config_settings()
+
+ def config_network(self):
+ network = ConfigureNetwork(self.yaml_config_dir, self.env_id, self.dea)
+ network.config_network()
+
+ def config_nodes(self):
+ nodes = ConfigureNodes(self.yaml_config_dir, self.env_id,
+ self.node_id_roles_dict, self.dea)
+ nodes.config_nodes()
+
+
+
diff --git a/fuel/deploy/cloud_deploy/cloud/configure_network.py b/fuel/deploy/cloud_deploy/cloud/configure_network.py
new file mode 100644
index 0000000..f4d6f87
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/configure_network.py
@@ -0,0 +1,62 @@
+import common
+import yaml
+import io
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+class ConfigureNetwork(object):
+
+ def __init__(self, yaml_config_dir, env_id, dea):
+ self.yaml_config_dir = yaml_config_dir
+ self.env_id = env_id
+ self.dea = dea
+ self.required_networks = []
+
+ def download_network_config(self):
+ LOG.debug('Download network config for environment %s\n' % self.env_id)
+ exec_cmd('fuel network --env %s --download --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def upload_network_config(self):
+ LOG.debug('Upload network config for environment %s\n' % self.env_id)
+ exec_cmd('fuel network --env %s --upload --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def config_network(self):
+ LOG.debug('Configure network\n')
+ self.download_network_config()
+ self.modify_network_config()
+ self.upload_network_config()
+
+ def modify_network_config(self):
+ LOG.debug('Modify network config for environment %s\n' % self.env_id)
+ network_yaml = (self.yaml_config_dir + '/network_%s.yaml'
+ % self.env_id)
+ check_file_exists(network_yaml)
+
+ network_config = self.dea.get_networks()
+
+
+ with io.open(network_yaml) as stream:
+ network = yaml.load(stream)
+
+ net_names = self.dea.get_network_names()
+ net_id = {}
+ for net in network['networks']:
+ if net['name'] in net_names:
+ net_id[net['name']] = {'id': net['id'],
+ 'group_id': net['group_id']}
+
+ for network in network_config['networks']:
+ network.update(net_id[network['name']])
+
+ with io.open(network_yaml, 'w') as stream:
+ yaml.dump(network_config, stream, default_flow_style=False) \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/configure_nodes.py b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py
new file mode 100644
index 0000000..a5e24a8
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py
@@ -0,0 +1,108 @@
+import common
+import yaml
+import io
+import glob
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+
+class ConfigureNodes(object):
+
+ def __init__(self, yaml_config_dir, env_id, node_id_roles_dict, dea):
+ self.yaml_config_dir = yaml_config_dir
+ self.env_id = env_id
+ self.node_id_roles_dict = node_id_roles_dict
+ self.dea = dea
+
+ def config_nodes(self):
+ LOG.debug('Configure nodes\n')
+ for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems():
+ exec_cmd('fuel node set --node-id %s --role %s --env %s'
+ % (node_id, ','.join(roles_shelf_blade[0]), self.env_id))
+
+ self.download_deployment_config()
+ self.modify_node_network_schemes()
+ self.upload_deployment_config()
+
+ for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems():
+ self.download_interface_config(node_id)
+ self.modify_node_interface(node_id)
+ self.upload_interface_config(node_id)
+
+ def modify_node_network_schemes(self):
+ LOG.debug('Modify node network schemes in environment %s\n' % self.env_id)
+ for node_file in glob.glob('%s/deployment_%s/*.yaml'
+ % (self.yaml_config_dir, self.env_id)):
+ check_file_exists(node_file)
+
+ if 'compute' in node_file:
+ node_type = 'compute'
+ else:
+ node_type = 'controller'
+
+ network_scheme = self.dea.get_network_scheme(node_type)
+
+ with io.open(node_file) as stream:
+ node = yaml.load(stream)
+
+ node['network_scheme']['transformations'] = network_scheme
+
+ with io.open(node_file, 'w') as stream:
+ yaml.dump(node, stream, default_flow_style=False)
+
+
+ def download_deployment_config(self):
+ LOG.debug('Download deployment config for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel deployment --env %s --default --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def upload_deployment_config(self):
+ LOG.debug('Upload deployment config for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel deployment --env %s --upload --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def download_interface_config(self, node_id):
+ LOG.debug('Download interface config for node %s\n' % node_id)
+ r, c = exec_cmd('fuel node --env %s --node %s --network --download '
+ '--dir %s' % (self.env_id, node_id,
+ self.yaml_config_dir))
+
+ def upload_interface_config(self, node_id):
+ LOG.debug('Upload interface config for node %s\n' % node_id)
+ r, c = exec_cmd('fuel node --env %s --node %s --network --upload '
+ '--dir %s' % (self.env_id, node_id,
+ self.yaml_config_dir))
+
+ def modify_node_interface(self, node_id):
+ LOG.debug('Modify interface config for node %s\n' % node_id)
+ interface_yaml = (self.yaml_config_dir + '/node_%s/interfaces.yaml'
+ % node_id)
+
+ with io.open(interface_yaml) as stream:
+ interfaces = yaml.load(stream)
+
+ net_name_id = {}
+ for interface in interfaces:
+ for network in interface['assigned_networks']:
+ net_name_id[network['name']] = network['id']
+
+ interface_config = self.dea.get_interfaces()
+
+ for interface in interfaces:
+ interface['assigned_networks'] = []
+ for net_name in interface_config[interface['name']]:
+ net = {}
+ net['id'] = net_name_id[net_name]
+ net['name'] = net_name
+ interface['assigned_networks'].append(net)
+
+ with io.open(interface_yaml, 'w') as stream:
+ yaml.dump(interfaces, stream, default_flow_style=False) \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/configure_settings.py b/fuel/deploy/cloud_deploy/cloud/configure_settings.py
new file mode 100644
index 0000000..3a3e4d5
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/configure_settings.py
@@ -0,0 +1,47 @@
+import common
+import yaml
+import io
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+class ConfigureSettings(object):
+
+ def __init__(self, yaml_config_dir, env_id, dea):
+ self.yaml_config_dir = yaml_config_dir
+ self.env_id = env_id
+ self.dea = dea
+
+ def download_settings(self):
+ LOG.debug('Download settings for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel settings --env %s --download --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def upload_settings(self):
+ LOG.debug('Upload settings for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel settings --env %s --upload --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def config_settings(self):
+ LOG.debug('Configure settings\n')
+ self.download_settings()
+ self.modify_settings()
+ self.upload_settings()
+
+ def modify_settings(self):
+ LOG.debug('Modify settings for environment %s\n' % self.env_id)
+ settings_yaml = (self.yaml_config_dir + '/settings_%s.yaml'
+ % self.env_id)
+ check_file_exists(settings_yaml)
+
+ settings = self.dea.get_settings()
+
+ with io.open(settings_yaml, 'w') as stream:
+ yaml.dump(settings, stream, default_flow_style=False)
diff --git a/fuel/deploy/cloud_deploy/cloud/dea.py b/fuel/deploy/cloud_deploy/cloud/dea.py
new file mode 100644
index 0000000..295636a
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/dea.py
@@ -0,0 +1,86 @@
+import yaml
+import io
+
+class DeploymentEnvironmentAdapter(object):
+ def __init__(self):
+ self.dea_struct = None
+ self.blade_ids_per_shelves = {}
+ self.blades_per_shelves = {}
+ self.shelf_ids = []
+ self.info_per_shelves = {}
+ self.network_names = []
+
+ def parse_yaml(self, yaml_path):
+ with io.open(yaml_path) as yaml_file:
+ self.dea_struct = yaml.load(yaml_file)
+ self.collect_shelf_and_blade_info()
+ self.collect_shelf_info()
+ self.collect_network_names()
+
+ def get_no_of_blades(self):
+ no_of_blades = 0
+ for shelf in self.dea_struct['shelf']:
+ no_of_blades += len(shelf['blade'])
+ return no_of_blades
+
+ def collect_shelf_info(self):
+ self.info_per_shelves = {}
+ for shelf in self.dea_struct['shelf']:
+ self.info_per_shelves[shelf['id']] = shelf
+
+ def get_shelf_info(self, shelf):
+ return (self.info_per_shelves[shelf]['type'],
+ self.info_per_shelves[shelf]['mgmt_ip'],
+ self.info_per_shelves[shelf]['username'],
+ self.info_per_shelves[shelf]['password'])
+
+ def get_environment_name(self):
+ return self.dea_struct['name']
+
+ def get_shelf_ids(self):
+ return self.shelf_ids
+
+ def get_blade_ids_per_shelf(self, shelf_id):
+ return self.blade_ids_per_shelves[shelf_id]
+
+ def get_blade_ids_per_shelves(self):
+ return self.blade_ids_per_shelves
+
+ def collect_shelf_and_blade_info(self):
+ self.blade_ids_per_shelves = {}
+ self.blades_per_shelves = {}
+ self.shelf_ids = []
+ for shelf in self.dea_struct['shelf']:
+ self.shelf_ids.append(shelf['id'])
+ blade_ids = self.blade_ids_per_shelves[shelf['id']] = []
+ blades = self.blades_per_shelves[shelf['id']] = {}
+ for blade in shelf['blade']:
+ blade_ids.append(blade['id'])
+ blades[blade['id']] = blade
+
+ def has_role(self, role, shelf, blade):
+ blade = self.blades_per_shelves[shelf][blade]
+ if role == 'compute':
+ return True if 'roles' not in blade else False
+ return (True if 'roles' in blade and role in blade['roles']
+ else False)
+
+ def collect_network_names(self):
+ self.network_names = []
+ for network in self.dea_struct['networks']['networks']:
+ self.network_names.append(network['name'])
+
+ def get_networks(self):
+ return self.dea_struct['networks']
+
+ def get_network_names(self):
+ return self.network_names
+
+ def get_settings(self):
+ return self.dea_struct['settings']
+
+ def get_network_scheme(self, node_type):
+ return self.dea_struct[node_type]
+
+ def get_interfaces(self):
+ return self.dea_struct['interfaces'] \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/deploy.py b/fuel/deploy/cloud_deploy/cloud/deploy.py
new file mode 100644
index 0000000..ea33f8b
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/deploy.py
@@ -0,0 +1,208 @@
+import time
+import yaml
+import io
+import os
+
+import common
+from dea import DeploymentEnvironmentAdapter
+from configure_environment import ConfigureEnvironment
+from deployment import Deployment
+
+SUPPORTED_RELEASE = 'Juno on CentOS 6.5'
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+class Deploy(object):
+
+ def __init__(self, yaml_config_dir):
+ self.supported_release = None
+ self.yaml_config_dir = yaml_config_dir
+ self.macs_per_shelf_dict = {}
+ self.node_ids_dict = {}
+ self.node_id_roles_dict = {}
+ self.env_id = None
+ self.shelf_blades_dict = {}
+
+ def cleanup_fuel_environments(self, env_list):
+ WAIT_LOOP = 60
+ SLEEP_TIME = 10
+ for env in env_list:
+ LOG.debug('Deleting environment %s\n' % env[E['id']])
+ exec_cmd('fuel env --env %s --delete' % env[E['id']])
+ all_env_erased = False
+ for i in range(WAIT_LOOP):
+ env_list = parse(exec_cmd('fuel env list'))
+ if env_list[0][0]:
+ time.sleep(SLEEP_TIME)
+ else:
+ all_env_erased = True
+ break
+ if not all_env_erased:
+ err('Could not erase these environments %s'
+ % [(env[E['id']], env[E['status']]) for env in env_list])
+
+ def cleanup_fuel_nodes(self, node_list):
+ for node in node_list:
+ if node[N['status']] == 'discover':
+ LOG.debug('Deleting node %s\n' % node[N['id']])
+ exec_cmd('fuel node --node-id %s --delete-from-db'
+ % node[N['id']])
+ exec_cmd('cobbler system remove --name node-%s'
+ % node[N['id']])
+
+ def check_previous_installation(self):
+ LOG.debug('Check previous installation\n')
+ env_list = parse(exec_cmd('fuel env list'))
+ if env_list[0][0]:
+ self.cleanup_fuel_environments(env_list)
+ node_list = parse(exec_cmd('fuel node list'))
+ if node_list[0][0]:
+ self.cleanup_fuel_nodes(node_list)
+
+ def check_supported_release(self):
+ LOG.debug('Check supported release: %s\n' % SUPPORTED_RELEASE)
+ release_list = parse(exec_cmd('fuel release -l'))
+ for release in release_list:
+ if release[R['name']] == SUPPORTED_RELEASE:
+ self.supported_release = release
+ break
+ if not self.supported_release:
+ err('This Fuel does not contain the following '
+ 'release: %s\n' % SUPPORTED_RELEASE)
+
+ def check_prerequisites(self):
+ LOG.debug('Check prerequisites\n')
+ self.check_supported_release()
+ self.check_previous_installation()
+
+ def find_mac_in_dict(self, mac):
+ for shelf, blade_dict in self.macs_per_shelf_dict.iteritems():
+ for blade, mac_list in blade_dict.iteritems():
+ if mac in mac_list:
+ return shelf, blade
+
+ def all_blades_discovered(self):
+ for shelf, blade_dict in self.node_ids_dict.iteritems():
+ for blade, node_id in blade_dict.iteritems():
+ if not node_id:
+ return False
+ return True
+
+ def not_discovered_blades_summary(self):
+ summary = ''
+ for shelf, blade_dict in self.node_ids_dict.iteritems():
+ for blade, node_id in blade_dict.iteritems():
+ if not node_id:
+ summary += '[shelf %s, blade %s]\n' % (shelf, blade)
+ return summary
+
+ def collect_blade_ids_per_shelves(self, dea):
+ self.shelf_blades_dict = dea.get_blade_ids_per_shelves()
+
+ def node_discovery(self, node_list, discovered_macs):
+ for node in node_list:
+ if (node[N['status']] == 'discover' and
+ node[N['online']] == 'True' and
+ node[N['mac']] not in discovered_macs):
+ discovered_macs.append(node[N['mac']])
+ shelf_blade = self.find_mac_in_dict(node[N['mac']])
+ if shelf_blade:
+ self.node_ids_dict[shelf_blade[0]][shelf_blade[1]] = \
+ node[N['id']]
+
+ def discovery_waiting_loop(self, discovered_macs):
+ WAIT_LOOP = 180
+ SLEEP_TIME = 10
+ all_discovered = False
+ for i in range(WAIT_LOOP):
+ node_list = parse(exec_cmd('fuel node list'))
+ if node_list[0][0]:
+ self.node_discovery(node_list, discovered_macs)
+ if self.all_blades_discovered():
+ all_discovered = True
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+ return all_discovered
+
+ def wait_for_discovered_blades(self):
+ LOG.debug('Wait for discovered blades\n')
+ discovered_macs = []
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ self.node_ids_dict[shelf] = {}
+ for blade in blade_list:
+ self.node_ids_dict[shelf][blade] = None
+ all_discovered = self.discovery_waiting_loop(discovered_macs)
+ if not all_discovered:
+ err('Not all blades have been discovered: %s\n'
+ % self.not_discovered_blades_summary())
+
+ def get_mac_addresses(self, macs_yaml):
+ with io.open(macs_yaml, 'r') as stream:
+ self.macs_per_shelf_dict = yaml.load(stream)
+
+ def assign_roles_to_cluster_node_ids(self, dea):
+ self.node_id_roles_dict = {}
+ for shelf, blades_dict in self.node_ids_dict.iteritems():
+ for blade, node_id in blades_dict.iteritems():
+ role_list = []
+ if dea.has_role('controller', shelf, blade):
+ role_list.extend(['controller', 'mongo'])
+ if dea.has_role('cinder', shelf, blade):
+ role_list.extend(['cinder'])
+ elif dea.has_role('compute', shelf, blade):
+ role_list.extend(['compute'])
+ self.node_id_roles_dict[node_id] = (role_list, shelf, blade)
+
+ def configure_environment(self, dea):
+ config_env = ConfigureEnvironment(dea, self.yaml_config_dir,
+ self.supported_release[R['id']],
+ self.node_id_roles_dict)
+ config_env.configure_environment()
+ self.env_id = config_env.env_id
+
+ def deploy(self, dea):
+ dep = Deployment(dea, self.yaml_config_dir, self.env_id,
+ self.node_id_roles_dict)
+ dep.deploy()
+
+
+def main():
+
+ base_dir = os.path.dirname(os.path.realpath(__file__))
+ dea_yaml = base_dir + '/dea.yaml'
+ check_file_exists(dea_yaml)
+ macs_yaml = base_dir + '/macs.yaml'
+ check_file_exists(macs_yaml)
+
+ yaml_config_dir = '/var/lib/opnfv/pre_deploy'
+
+ deploy = Deploy(yaml_config_dir)
+ dea = DeploymentEnvironmentAdapter()
+ dea.parse_yaml(dea_yaml)
+
+ deploy.get_mac_addresses(macs_yaml)
+
+ deploy.collect_blade_ids_per_shelves(dea)
+
+ deploy.check_prerequisites()
+
+ deploy.wait_for_discovered_blades()
+
+ deploy.assign_roles_to_cluster_node_ids(dea)
+
+ deploy.configure_environment(dea)
+
+ deploy.deploy(dea)
+
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/deployment.py b/fuel/deploy/cloud_deploy/cloud/deployment.py
new file mode 100644
index 0000000..831059b
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/deployment.py
@@ -0,0 +1,100 @@
+import common
+import os
+import shutil
+import glob
+import yaml
+import io
+import time
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+run_proc = common.run_proc
+parse = common.parse
+err = common.err
+LOG = common.LOG
+
+
+class Deployment(object):
+
+ def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict):
+ self.dea = dea
+ self.env_name = dea.get_environment_name()
+ self.yaml_config_dir = yaml_config_dir
+ self.env_id = env_id
+ self.node_id_roles_dict = node_id_roles_dict
+ self.node_id_list = []
+ for node_id in self.node_id_roles_dict.iterkeys():
+ self.node_id_list.append(node_id)
+ self.node_id_list.sort()
+
+ def download_deployment_info(self):
+ LOG.debug('Download deployment info for environment %s\n' % self.env_id)
+ deployment_dir = self.yaml_config_dir + '/deployment_%s' % self.env_id
+ if os.path.exists(deployment_dir):
+ shutil.rmtree(deployment_dir)
+ r, c = exec_cmd('fuel --env %s deployment --default --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+ if c > 0:
+ err('Error: Could not download deployment info for env %s,'
+ ' reason: %s\n' % (self.env_id, r))
+
+ def upload_deployment_info(self):
+ LOG.debug('Upload deployment info for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel --env %s deployment --upload --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+ if c > 0:
+ err('Error: Could not upload deployment info for env %s,'
+ ' reason: %s\n' % (self.env_id, r))
+
+ def pre_deploy(self):
+ LOG.debug('Running pre-deploy on environment %s\n' % self.env_name)
+ self.download_deployment_info()
+ opnfv = {'opnfv': {}}
+
+ for node_file in glob.glob('%s/deployment_%s/*.yaml'
+ % (self.yaml_config_dir, self.env_id)):
+ with io.open(node_file) as stream:
+ node = yaml.load(stream)
+
+ if 'opnfv' not in node:
+ node.update(opnfv)
+
+ with io.open(node_file, 'w') as stream:
+ yaml.dump(node, stream, default_flow_style=False)
+ self.upload_deployment_info()
+
+
+ def deploy(self):
+ WAIT_LOOP = 180
+ SLEEP_TIME = 60
+
+ self.pre_deploy()
+
+ log_file = 'cloud.log'
+
+ LOG.debug('Starting deployment of environment %s\n' % self.env_name)
+ run_proc('fuel --env %s deploy-changes | strings | tee %s'
+ % (self.env_id, log_file))
+
+ ready = False
+ for i in range(WAIT_LOOP):
+ env = parse(exec_cmd('fuel env --env %s' % self.env_id))
+ LOG.debug('Environment status: %s\n' % env[0][E['status']])
+ r, _ = exec_cmd('tail -2 %s | head -1' % log_file)
+ if r:
+ LOG.debug('%s\n' % r)
+ if env[0][E['status']] == 'operational':
+ ready = True
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+ exec_cmd('rm %s' % log_file)
+
+ if ready:
+ LOG.debug('Environment %s successfully deployed\n' % self.env_name)
+ else:
+ err('Deployment failed, environment %s is not operational\n'
+ % self.env_name)
diff --git a/fuel/deploy/cloud_deploy/cloud_deploy.py b/fuel/deploy/cloud_deploy/cloud_deploy.py
new file mode 100644
index 0000000..4197519
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud_deploy.py
@@ -0,0 +1,117 @@
+import os
+import io
+import yaml
+
+from cloud import common
+from cloud.dea import DeploymentEnvironmentAdapter
+from hardware_adapters.dha import DeploymentHardwareAdapter
+from ssh_client import SSHClient
+
+exec_cmd = common.exec_cmd
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+class CloudDeploy(object):
+
+ def __init__(self, fuel_ip, fuel_username, fuel_password):
+ self.fuel_ip = fuel_ip
+ self.fuel_username = fuel_username
+ self.fuel_password = fuel_password
+ self.shelf_blades_dict = {}
+ self.macs_per_shelf_dict = {}
+
+ def copy_to_fuel_master(self, dir_path=None, file_path=None, target='~'):
+ if dir_path:
+ path = '-r ' + dir_path
+ elif file_path:
+ path = file_path
+ LOG.debug('Copying %s to Fuel Master %s' % (path, target))
+ if path:
+ exec_cmd('sshpass -p %s scp -o UserKnownHostsFile=/dev/null'
+ ' -o StrictHostKeyChecking=no -o ConnectTimeout=15'
+ ' %s %s@%s:%s'
+ % (self.fuel_password, path, self.fuel_username,
+ self.fuel_ip, target))
+
+ def run_cloud_deploy(self, deploy_dir, deploy_app):
+ LOG.debug('START CLOUD DEPLOYMENT')
+ ssh = SSHClient(self.fuel_ip, self.fuel_username, self.fuel_password)
+ ssh.open()
+ ssh.run('python %s/%s' % (deploy_dir, deploy_app))
+ ssh.close()
+
+ def power_off_blades(self, dea):
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
+ dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
+ dha.power_off_blades(shelf, blade_list)
+
+ def power_on_blades(self, dea):
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
+ dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
+ dha.power_on_blades(shelf, blade_list)
+
+ def set_boot_order(self, dea):
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
+ dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
+ dha.set_boot_order_blades(shelf, blade_list)
+
+ def get_mac_addresses(self, dea, macs_yaml):
+ self.macs_per_shelf_dict = {}
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
+ dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
+ self.macs_per_shelf_dict[shelf] = dha.get_blades_mac_addresses(
+ shelf, blade_list)
+
+ with io.open(macs_yaml, 'w') as stream:
+ yaml.dump(self.macs_per_shelf_dict, stream,
+ default_flow_style=False)
+
+ def collect_blade_ids_per_shelves(self, dea):
+ self.shelf_blades_dict = dea.get_blade_ids_per_shelves()
+
+
+
+def main():
+
+ fuel_ip = '10.20.0.2'
+ fuel_username = 'root'
+ fuel_password = 'r00tme'
+ deploy_dir = '~/cloud'
+
+ cloud = CloudDeploy(fuel_ip, fuel_username, fuel_password)
+
+ base_dir = os.path.dirname(os.path.realpath(__file__))
+ deployment_dir = base_dir + '/cloud'
+ macs_yaml = base_dir + '/macs.yaml'
+ dea_yaml = base_dir + '/dea.yaml'
+ check_file_exists(dea_yaml)
+
+ cloud.copy_to_fuel_master(dir_path=deployment_dir)
+ cloud.copy_to_fuel_master(file_path=dea_yaml, target=deploy_dir)
+
+ dea = DeploymentEnvironmentAdapter()
+ dea.parse_yaml(dea_yaml)
+
+ cloud.collect_blade_ids_per_shelves(dea)
+
+ cloud.power_off_blades(dea)
+
+ cloud.set_boot_order(dea)
+
+ cloud.power_on_blades(dea)
+
+ cloud.get_mac_addresses(dea, macs_yaml)
+ check_file_exists(dea_yaml)
+
+ cloud.copy_to_fuel_master(file_path=macs_yaml, target=deploy_dir)
+
+ cloud.run_cloud_deploy(deploy_dir, 'deploy.py')
+
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/dha.py b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py
new file mode 100644
index 0000000..2764aeb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py
@@ -0,0 +1,61 @@
+from hp.hp_adapter import HpAdapter
+from libvirt.libvirt_adapter import LibvirtAdapter
+
+class DeploymentHardwareAdapter(object):
+ def __new__(cls, server_type, *args):
+ if cls is DeploymentHardwareAdapter:
+ if server_type == 'esxi': return EsxiAdapter(*args)
+ if server_type == 'hp': return HpAdapter(*args)
+ if server_type == 'dell': return DellAdapter(*args)
+ if server_type == 'libvirt': return LibvirtAdapter(*args)
+ return super(DeploymentHardwareAdapter, cls).__new__(cls)
+
+
+class HardwareAdapter(object):
+
+ def power_off_blades(self, shelf, blade_list):
+ raise NotImplementedError
+
+ def power_off_blade(self, shelf, blade):
+ raise NotImplementedError
+
+ def power_on_blades(self, shelf, blade_list):
+ raise NotImplementedError
+
+ def power_on_blade(self, shelf, blade):
+ raise NotImplementedError
+
+ def power_cycle_blade(self):
+ raise NotImplementedError
+
+ def set_boot_order_blades(self, shelf, blade_list):
+ raise NotImplementedError
+
+ def set_boot_order_blade(self, shelf, blade):
+ raise NotImplementedError
+
+ def reset_to_factory_defaults(self):
+ raise NotImplementedError
+
+ def configure_networking(self):
+ raise NotImplementedError
+
+ def get_blade_mac_addresses(self, shelf, blade):
+ raise NotImplementedError
+
+ def get_hardware_info(self, shelf, blade):
+ raise NotImplementedError
+
+
+class EsxiAdapter(HardwareAdapter):
+
+ def __init__(self):
+ self.environment = {1: {1: {'mac': ['00:50:56:8c:05:85']},
+ 2: {'mac': ['00:50:56:8c:21:92']}}}
+
+ def get_blade_mac_addresses(self, shelf, blade):
+ return self.environment[shelf][blade]['mac']
+
+
+class DellAdapter(HardwareAdapter):
+ pass
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py
new file mode 100644
index 0000000..916d4dc
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py
@@ -0,0 +1,433 @@
+import re
+import time
+from netaddr import EUI, mac_unix
+from cloud import common
+
+from run_oa_command import RunOACommand
+
+LOG = common.LOG
+
+class HpAdapter(object):
+
+ # Exception thrown at any kind of failure to get the requested
+ # information.
+ class NoInfoFoundError(Exception):
+ pass
+
+ # Totally failed to connect so a re-try with other HW should
+ # be done. This exception should never escape this class.
+ class InternalConnectError(Exception):
+ pass
+
+ # Format MAC so leading zeroes are displayed
+ class mac_dhcp(mac_unix):
+ word_fmt = "%.2x"
+
+ def __init__(self, mgmt_ip, username, password):
+ self.mgmt_ip = mgmt_ip
+ self.username = username
+ self.password = password
+ self.oa_error_message = ''
+
+ def get_blade_mac_addresses(self, shelf, blade):
+
+ LOG.debug("Entering: get_mac_addr_hp(%d,%d)" % (shelf, blade))
+ self.oa_error_message = ''
+ oa = RunOACommand(self.mgmt_ip, self.username, self.password)
+
+ LOG.debug("Connect to active OA for shelf %d" % shelf)
+ try:
+ res = oa.connect_to_active()
+ except:
+ raise self.InternalConnectError(oa.error_message)
+ if res is None:
+ raise self.InternalConnectError(oa.error_message)
+ if not oa.connected():
+ raise self.NoInfoFoundError(oa.error_message)
+
+ cmd = ("show server info " + str(blade))
+
+ LOG.debug("Send command to OA: %s" % cmd)
+ try:
+ serverinfo = oa.send_command(cmd)
+ except:
+ raise self.NoInfoFoundError(oa.error_message)
+ finally:
+ oa.close()
+
+ (left, right) = self.find_mac(serverinfo, shelf, blade)
+
+ left = EUI(left, dialect=self.mac_dhcp)
+ right = EUI(right, dialect=self.mac_dhcp)
+ return [str(left), str(right)]
+
+ def get_blades_mac_addresses(self, shelf, blade_list):
+ macs_per_blade_dict = {}
+ LOG.debug("Getting MAC addresses for shelf %s, blades %s"
+ % (shelf, blade_list))
+ self.oa_error_message = ''
+ oa = RunOACommand(self.mgmt_ip, self.username, self.password)
+
+ LOG.debug("Connect to active OA for shelf %d" % shelf)
+ try:
+ res = oa.connect_to_active()
+ except:
+ raise self.InternalConnectError(oa.error_message)
+ if res is None:
+ raise self.InternalConnectError(oa.error_message)
+ if not oa.connected():
+ raise self.NoInfoFoundError(oa.error_message)
+ try:
+ for blade in blade_list:
+ LOG.debug("Send command to OA: %s" % cmd)
+ cmd = ("show server info %s" % blade)
+ printout = oa.send_command(cmd)
+ left, right = self.find_mac(printout, shelf, blade)
+ left = EUI(left, dialect=self.mac_dhcp)
+ right = EUI(right, dialect=self.mac_dhcp)
+ macs_per_blade_dict[blade] = [str(left), str(right)]
+ except:
+ raise self.NoInfoFoundError(oa.error_message)
+ finally:
+ oa.close()
+ return macs_per_blade_dict
+
+ def get_blade_hardware_info(self, shelf, blade=None):
+ if blade:
+ LOG.debug("Entering: get_hp_info(%d,%d)" % (shelf, blade))
+ else:
+ LOG.debug("Entering: get_hp_info(%d)" % shelf)
+
+ self.oa_error_message = ''
+ oa = RunOACommand(self.mgmt_ip, self.username, self.password)
+
+ LOG.debug("Connect to active OA for shelf %d" % shelf)
+
+ try:
+ res = oa.connect_to_active()
+ except:
+ self.oa_error_message = oa.error_message
+ return None
+ if res is None:
+ self.oa_error_message = oa.error_message
+ return None
+ if not oa.connected():
+ self.oa_error_message = oa.error_message
+ return None
+
+ # If no blade specified we're done we know this is an HP at this point
+ if not blade:
+ oa.close()
+ return "HP"
+
+ check = "show server info %d" % blade
+ LOG.debug("Send command to OA: %s" % check)
+ output = oa.send_command("%s" % check)
+ oa.close()
+
+ match = r"Product Name:\s+(.+)\Z"
+ if re.search(match, str(output[:])) is None:
+ self.oa_error_message = ("Blade %d in shelf %d does not exist\n"
+ % (blade, shelf))
+ return None
+
+ for line in output:
+ seobj = re.search(match, line)
+ if seobj:
+ return "HP %s" % seobj.group(1)
+ return False
+
+ def power_off_blades(self, shelf, blade_list):
+ return self.set_state(shelf, 'locked', blade_list=blade_list)
+
+ def power_on_blades(self, shelf, blade_list):
+ return self.set_state(shelf, 'unlocked', blade_list=blade_list)
+
+ def set_boot_order_blades(self, shelf, blade_list):
+ return self.set_boot_order(shelf, blade_list=blade_list)
+
+ def power_off_blade(self, shelf, blade):
+ return self.set_state(shelf, 'locked', one_blade=blade)
+
+ def power_on_blade(self, shelf, blade):
+ return self.set_state(shelf, 'unlocked', one_blade=blade)
+
+ def set_boot_order_blade(self, shelf, blade):
+ return self.set_boot_order(shelf, one_blade=blade)
+
+ # Search HP's OA server info for MAC for left and right control
+ def find_mac(self, printout, shelf, blade):
+ left = False
+ right = False
+ for line in printout:
+ if ("No Server Blade Installed" in line or
+ "Invalid Arguments" in line):
+ raise self.NoInfoFoundError("Blade %d in shelf %d "
+ "does not exist." % (blade, shelf))
+ seobj = re.search(r"LOM1:1-a\s+([0-9A-F:]+)", line, re.I)
+ if seobj:
+ left = seobj.group(1)
+ else:
+ seobj = re.search(r"LOM1:2-a\s+([0-9A-F:]+)", line, re.I)
+ if seobj:
+ right = seobj.group(1)
+ if left and right:
+ return left, right
+ raise self.NoInfoFoundError("Could not find MAC for blade %d "
+ "in shelf %d." % (blade, shelf))
+
+ # Do power on or off on all configured blades in shelf
+ # Return None to indicate that no connection do OA succeeded,
+ # Return False to indicate some connection to OA succeeded,
+ # or config error
+ # Return True to indicate that power state succesfully updated
+ # state: locked, unlocked
+ def set_state(self, shelf, state, one_blade=None, blade_list=None):
+ if state not in ['locked', 'unlocked']:
+ return None
+
+ if one_blade:
+ LOG.debug("Entering: set_state_hp(%d,%s,%d)" %
+ (shelf, state, one_blade))
+ else:
+ LOG.debug("Entering: set_state_hp(%d,%s)" % (shelf, state))
+
+ self.oa_error_message = ''
+
+ oa = RunOACommand(self.mgmt_ip, self.username, self.password)
+
+ LOG.debug("Connect to active OA for shelf %d" % shelf)
+
+ try:
+ res = oa.connect_to_active()
+ except:
+ self.oa_error_message = oa.error_message
+ return None
+ if res is None:
+ self.oa_error_message = oa.error_message
+ return None
+ if not oa.connected():
+ self.oa_error_message = oa.error_message
+ return False
+
+ if one_blade:
+ blades = [one_blade]
+ else:
+ blades = sorted(blade_list)
+
+ LOG.debug("Check if blades are present")
+
+ check = "show server list"
+
+ LOG.debug("Send command to OA: %s" % check)
+ output = oa.send_command(check)
+ first = True
+ bladelist = ''
+ for blade in blades:
+ prog = re.compile(r"\s+" + str(blade) + r"\s+\[Absent\]",
+ re.MULTILINE)
+ if prog.search(str(output[:])) is not None:
+ oa.close()
+ self.oa_error_message = ("Blade %d in shelf %d "
+ % (blade, shelf))
+ if one_blade:
+ self.oa_error_message += ("does not exist.\n"
+ "Set state %s not performed.\n"
+ % state)
+ else:
+ self.oa_error_message += (
+ "specified but does not exist.\nSet "
+ "state %s not performed on shelf %d\n"
+ % (state, shelf))
+ return False
+ if not first:
+ bladelist += ","
+ else:
+ first = False
+ bladelist += str(blade)
+
+ if blade_list:
+ LOG.debug("All blades present")
+
+ # Use leading upper case on On/Off so it can be reused in match
+ extra = ""
+ if state == "locked":
+ powerstate = "Off"
+ extra = "force"
+ else:
+ powerstate = "On"
+
+ cmd = "power%s server %s" % (powerstate, bladelist)
+
+ if extra != "":
+ cmd += " %s" % extra
+
+ LOG.debug("Send command to OA: %s" % cmd)
+
+ try:
+ oa.send_command(cmd)
+ except:
+ self.oa_error_message = oa.error_message
+ oa.close()
+ return False
+
+ # Check that all blades reach the state which can take some time,
+ # so re-try a couple of times
+ LOG.debug("Check if state %s successfully set" % state)
+ recheck = 2
+ while True:
+ LOG.debug("Send command to OA: %s" % check)
+ try:
+ output = oa.send_command(check)
+ except:
+ self.oa_error_message = oa.error_message
+ oa.close()
+ return False
+ for blade in blades:
+ match = (r"\s+" + str(blade) +
+ r"\s+\w+\s+\w+.\w+.\w+.\w+\s+\w+\s+%s" %
+ powerstate)
+ prog = re.compile(match, re.MULTILINE)
+ if prog.search(str(output[:])) is None:
+ recheck -= 1
+ if recheck >= 0:
+ # Re-try
+ time.sleep(3)
+ break
+ oa.close()
+ self.oa_error_message = (
+ "Could not set state %s on blade %d in shelf %d\n"
+ % (state, one_blade, shelf))
+ for line in output:
+ self.oa_error_message += line
+ return False
+ else:
+ # state reached for all blades, exit the infinite loop
+ break
+
+ if one_blade:
+ LOG.debug("State %s successfully set on blade %d in shelf %d"
+ % (state, one_blade, shelf))
+ else:
+ LOG.debug("State %s successfully set on blades %s in shelf %d"
+ % (state, blade_list, shelf))
+ oa.close()
+ return True
+
+ # Change boot order on all blades in shelf
+ # Return None to indicate that no connection do OA succeeded,
+ # Return False to indicate some connection to OA succeeded,
+ # or config error,
+ # Return True to indicate that boot order succesfully changed
+ def set_boot_order(self, shelf, one_blade=None, blade_list=None):
+
+ if one_blade:
+ LOG.debug("Entering: set_bootorder_hp(%d,%d)" % (shelf, one_blade))
+ else:
+ LOG.debug("Entering: set_bootorder_hp(%d)" % shelf)
+
+ self.oa_error_message = ''
+
+ oa = RunOACommand(self.mgmt_ip, self.username, self.password)
+
+ LOG.debug("Connect to active OA for shelf %d" % shelf)
+
+ try:
+ res = oa.connect_to_active()
+ except:
+ self.oa_error_message = oa.error_message
+ return None
+ if res is None:
+ self.oa_error_message = oa.error_message
+ return None
+ if not oa.connected():
+ self.oa_error_message = oa.error_message
+ return False
+
+ if one_blade:
+ blades = [one_blade]
+ else:
+ blades = sorted(blade_list)
+
+ LOG.debug("Check if blades are present")
+
+ check = "show server list"
+
+ LOG.debug("Send command to OA: %s" % check)
+
+ output = oa.send_command(check)
+ first = True
+ bladelist = ''
+ for blade in blades:
+ prog = re.compile(r"\s+" + str(blade) + r"\s+\[Absent\]",
+ re.MULTILINE)
+ if prog.search(str(output[:])) is not None:
+ oa.close()
+ self.oa_error_message = ("Blade %d in shelf %d "
+ % (blade, shelf))
+ if one_blade:
+ self.oa_error_message += (
+ "does not exist.\nChange boot order not performed.\n")
+ else:
+ self.oa_error_message += (
+ "specified but does not exist.\n"
+ "Change boot order not performed on shelf %d\n"
+ % shelf)
+ return False
+ if not first:
+ bladelist += ','
+ else:
+ first = False
+ bladelist += str(blade)
+
+ if blade_list:
+ LOG.debug("All blades present")
+
+ # Boot origins are pushed so first set boot from hard disk, then PXE
+ # NB! If we want to support boot from SD we must add USB to the "stack"
+ cmd1 = "set server boot first hdd %s" % bladelist
+ cmd2 = "set server boot first pxe %s" % bladelist
+ for cmd in [cmd1, cmd2]:
+
+ LOG.debug("Send command to OA: %s" % cmd)
+ try:
+ output = oa.send_command(cmd)
+ except:
+ self.oa_error_message = oa.error_message
+ for line in output:
+ self.oa_error_message += line
+ oa.close()
+ return False
+
+ # Check that all blades got the correct boot order
+ # Needs updating if USB is added
+ LOG.debug("Check if boot order successfully set")
+ match = (r"^.*Boot Order\):\',\s*\'(\\t)+PXE NIC 1\',\s*\'(\\t)"
+ r"+Hard Drive")
+ prog = re.compile(match)
+ for blade in blades:
+
+ check = "show server boot %d" % blade
+
+ LOG.debug("Send command to OA: %s" % check)
+ try:
+ output = oa.send_command(check)
+ except:
+ self.oa_error_message = oa.error_message
+ oa.close()
+ return False
+ if prog.search(str(output[:])) is None:
+ oa.close()
+ self.oa_error_message = ("Failed to set boot order on blade "
+ "%d in shelf %d\n" % (blade, shelf))
+ for line in output:
+ self.oa_error_message += line
+ return False
+ LOG.debug("Boot order successfully set on blade %d in shelf %d"
+ % (blade, shelf))
+
+ if blade_list:
+ LOG.debug("Boot order successfully set on all configured blades "
+ "in shelf %d" % (shelf))
+ oa.close()
+ return True
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py
new file mode 100644
index 0000000..36fac77
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py
@@ -0,0 +1,110 @@
+import socket
+import paramiko
+
+from cloud import common
+
+LOG = common.LOG
+
+class RunOACommand:
+
+ def __init__(self, mgmt_ip, username, password):
+ self.ssh = None
+ self.mgmt_ip = mgmt_ip
+ self.username = username
+ self.password = password
+ self.error_message = ""
+
+ def connected(self):
+ return self.ssh is not None
+
+ def close(self):
+ if self.connected():
+ self.ssh.close()
+ self.ssh = None
+ self.error_message = ""
+
+ def connect(self):
+ LOG.info("Trying to connect to OA at %s" % self.mgmt_ip)
+ try:
+ self.ssh.connect(self.mgmt_ip,
+ username=self.username,
+ password=self.password,
+ look_for_keys=False,
+ allow_agent=False)
+ return True
+ except socket.error, (err, message):
+ self.error_message += ("Can not talk to OA %s: %s\n" %
+ (self.mgmt_ip, message))
+ except Exception as e:
+ self.error_message += ("Can not talk to OA %s: %s\n" %
+ (self.mgmt_ip, e.args))
+ LOG.error("Failed to connect to OA at %s" % self.mgmt_ip)
+ return False
+
+ # Return None if this most likely is not an OA
+ # False if we failed to connect to an active OA
+ # True if connected
+ def connect_to_active(self):
+ self.error_message = "OA connect failed with these errors:\n"
+
+ self.ssh = paramiko.SSHClient()
+ self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
+ initial_mgmt_ip = self.mgmt_ip
+ if not self.connect(self.mgmt_ip, self.username, self.password):
+ octets = self.mgmt_ip.split(".")
+ self.mgmt_ip = "%s.%s.%s.%s" % (octets[0],
+ octets[1],
+ octets[2],
+ str(int(octets[3]) + 1))
+ if not self.connect(self.mgmt_ip, self.username, self.password):
+ self.ssh = None
+ LOG.error("Failed to connect to OA at %s (and %s)" %
+ (initial_mgmt_ip, self.mgmt_ip))
+ return None
+
+ output = self.send_command("show oa status")
+ for line in output:
+ if "Standby" in line:
+ self.ssh.close()
+ self.error_message += (
+ "%s is the standby OA, trying next OA\n" % self.mgmt_ip)
+ LOG.info("%s is the standby OA" % self.mgmt_ip)
+ if self.mgmt_ip != initial_mgmt_ip:
+ self.error_message += (
+ "Can only talk to OA %s which is the standby OA\n" %
+ self.mgmt_ip)
+ self.ssh = None
+ return False
+ else:
+ octets = self.mgmt_ip.split(".")
+ self.mgmt_ip = "%s.%s.%s.%s" % (octets[0],
+ octets[1],
+ octets[2],
+ str(int(octets[3]) + 1))
+ if not self.connect(self.mgmt_ip, self.username,
+ self.password):
+ self.ssh = None
+ return False
+ LOG.info("Connected to active OA at %s" % self.mgmt_ip)
+ self.error_message = ""
+ return True
+
+ def send_command(self, cmd):
+ if not self.connected():
+ self.error_message = (
+ "Not connected, cannot send command %s\n" % (cmd))
+ raise
+
+ LOG.info('Sending "%s" to %s' % (cmd, self.mgmt_ip))
+ stdin, stdout, stderr = self.ssh.exec_command(cmd)
+ output = []
+ for line in stdout.read().splitlines():
+ if line != '':
+ output.append(line)
+ return output
+
+ def __exit__(self, type, value, traceback):
+ if self.connected():
+ self.close()
+ self.ssh = None \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py
new file mode 100644
index 0000000..d332e59
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py
@@ -0,0 +1,153 @@
+from lxml import etree
+from cloud import common
+from ssh_client import SSHClient
+
+exec_cmd = common.exec_cmd
+err = common.err
+LOG = common.LOG
+
+
+class LibvirtAdapter(object):
+
+ def __init__(self, mgmt_ip, username, password):
+ self.mgmt_ip = mgmt_ip
+ self.username = username
+ self.password = password
+ self.parser = etree.XMLParser(remove_blank_text=True)
+
+ def power_off_blades(self, shelf, blade_list):
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ for blade in blade_list:
+ LOG.debug('Power off blade %s in shelf %s' % (blade, shelf))
+ vm_name = 's%s_b%s' % (shelf, blade)
+ resp = ssh.execute('virsh destroy %s' % vm_name)
+ LOG.debug('response: %s' % resp)
+ ssh.close()
+
+ def power_on_blades(self, shelf, blade_list):
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ for blade in blade_list:
+ LOG.debug('Power on blade %s in shelf %s' % (blade, shelf))
+ vm_name = 's%s_b%s' % (shelf, blade)
+ resp = ssh.execute('virsh start %s' % vm_name)
+ LOG.debug('response: %s' % resp)
+ ssh.close()
+
+ def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None):
+ if not boot_dev_list:
+ boot_dev_list = ['network', 'hd']
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ temp_dir= ssh.execute('mktemp -d').strip()
+ for blade in blade_list:
+ LOG.debug('Set boot order %s on blade %s in shelf %s'
+ % (boot_dev_list, blade, shelf))
+ vm_name = 's%s_b%s' % (shelf, blade)
+ resp = ssh.execute('virsh dumpxml %s' % vm_name)
+ xml_dump = etree.fromstring(resp, self.parser)
+ os = xml_dump.xpath('/domain/os')
+ for o in os:
+ for bootelem in ['boot', 'bootmenu']:
+ boot = o.xpath(bootelem)
+ for b in boot:
+ b.getparent().remove(b)
+ for dev in boot_dev_list:
+ b = etree.Element('boot')
+ b.set('dev', dev)
+ o.append(b)
+ bmenu = etree.Element('bootmenu')
+ bmenu.set('enable', 'no')
+ o.append(bmenu)
+ tree = etree.ElementTree(xml_dump)
+ xml_file = temp_dir + '/%s.xml' % vm_name
+ with open(xml_file, 'w') as f:
+ tree.write(f, pretty_print=True, xml_declaration=True)
+ ssh.execute('virsh define %s' % xml_file)
+ ssh.execute('rm -fr %s' % temp_dir)
+ ssh.close()
+
+ def get_blades_mac_addresses(self, shelf, blade_list):
+ LOG.debug('Get the MAC addresses of blades %s in shelf %s'
+ % (blade_list, shelf))
+ macs_per_blade_dict = {}
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ for blade in blade_list:
+ vm_name = 's%s_b%s' % (shelf, blade)
+ mac_list = macs_per_blade_dict[blade] = []
+ resp = ssh.execute('virsh dumpxml %s' % vm_name)
+ xml_dump = etree.fromstring(resp)
+ interfaces = xml_dump.xpath('/domain/devices/interface')
+ for interface in interfaces:
+ macs = interface.xpath('mac')
+ for mac in macs:
+ mac_list.append(mac.get('address'))
+ ssh.close()
+ return macs_per_blade_dict
+
+ def load_image_file(self, shelf=None, blade=None, vm=None,
+ image_path=None):
+ if shelf and blade:
+ vm_name = 's%s_b%s' % (shelf, blade)
+ else:
+ vm_name = vm
+
+ LOG.debug('Load media file %s into %s '
+ % (image_path, 'vm %s' % vm if vm else 'blade %s in shelf %s'
+ % (shelf, blade)))
+
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ temp_dir= ssh.execute('mktemp -d').strip()
+ resp = ssh.execute('virsh dumpxml %s' % vm_name)
+ xml_dump = etree.fromstring(resp)
+
+ disks = xml_dump.xpath('/domain/devices/disk')
+ for disk in disks:
+ if disk.get('device') == 'cdrom':
+ disk.set('type', 'file')
+ sources = disk.xpath('source')
+ for source in sources:
+ disk.remove(source)
+ source = etree.SubElement(disk, 'source')
+ source.set('file', image_path)
+ tree = etree.ElementTree(xml_dump)
+ xml_file = temp_dir + '/%s.xml' % vm_name
+ with open(xml_file, 'w') as f:
+ tree.write(f, pretty_print=True, xml_declaration=True)
+ ssh.execute('virsh define %s' % xml_file)
+ ssh.execute('rm -fr %s' % temp_dir)
+ ssh.close()
+
+ def eject_image_file(self, shelf=None, blade=None, vm=None):
+ if shelf and blade:
+ vm_name = 's%s_b%s' % (shelf, blade)
+ else:
+ vm_name = vm
+
+ LOG.debug('Eject media file from %s '
+ % 'vm %s' % vm if vm else 'blade %s in shelf %s'
+ % (shelf, blade))
+
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ temp_dir= ssh.execute('mktemp -d').strip()
+ resp = ssh.execute('virsh dumpxml %s' % vm_name)
+ xml_dump = etree.fromstring(resp)
+
+ disks = xml_dump.xpath('/domain/devices/disk')
+ for disk in disks:
+ if disk.get('device') == 'cdrom':
+ disk.set('type', 'block')
+ sources = disk.xpath('source')
+ for source in sources:
+ disk.remove(source)
+ tree = etree.ElementTree(xml_dump)
+ xml_file = temp_dir + '/%s.xml' % vm_name
+ with open(xml_file, 'w') as f:
+ tree.write(f, pretty_print=True, xml_declaration=True)
+ ssh.execute('virsh define %s' % xml_file)
+ ssh.execute('rm -fr %s' % temp_dir)
+ ssh.close()
diff --git a/fuel/deploy/cloud_deploy/ssh_client.py b/fuel/deploy/cloud_deploy/ssh_client.py
new file mode 100644
index 0000000..b9aad6c
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/ssh_client.py
@@ -0,0 +1,56 @@
+import paramiko
+from cloud import common
+
+TIMEOUT = 600
+LOG = common.LOG
+
+class SSHClient(object):
+
+ def __init__(self, host, username, password):
+ self.host = host
+ self.username = username
+ self.password = password
+ self.client = None
+
+ def open(self, timeout=TIMEOUT):
+ self.client = paramiko.SSHClient()
+ self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.client.connect(self.host, username=self.username,
+ password=self.password, timeout=timeout)
+
+ def close(self):
+ if self.client is not None:
+ self.client.close()
+ self.client = None
+
+ def execute(self, command, sudo=False, timeout=TIMEOUT):
+ if sudo and self.username != 'root':
+ command = "sudo -S -p '' %s" % command
+ stdin, stdout, stderr = self.client.exec_command(command,
+ timeout=timeout)
+ if sudo:
+ stdin.write(self.password + '\n')
+ stdin.flush()
+ return ''.join(''.join(stderr.readlines()) +
+ ''.join(stdout.readlines()))
+
+ def run(self, command):
+ transport = self.client.get_transport()
+ transport.set_keepalive(1)
+ chan = transport.open_session()
+ chan.exec_command(command)
+
+ while not chan.exit_status_ready():
+ if chan.recv_ready():
+ data = chan.recv(1024)
+ while data:
+ print data
+ data = chan.recv(1024)
+
+ if chan.recv_stderr_ready():
+ error_buff = chan.recv_stderr(1024)
+ while error_buff:
+ print error_buff
+ error_buff = chan.recv_stderr(1024)
+ exit_status = chan.recv_exit_status()
+ LOG.debug('Exit status %s' % exit_status) \ No newline at end of file