diff options
Diffstat (limited to 'fuel/deploy/cloud_deploy')
18 files changed, 0 insertions, 1416 deletions
diff --git a/fuel/deploy/cloud_deploy/__init__.py b/fuel/deploy/cloud_deploy/__init__.py deleted file mode 100644 index c274feb..0000000 --- a/fuel/deploy/cloud_deploy/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/cloud/__init__.py b/fuel/deploy/cloud_deploy/cloud/__init__.py deleted file mode 100644 index c274feb..0000000 --- a/fuel/deploy/cloud_deploy/cloud/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/cloud/common.py b/fuel/deploy/cloud_deploy/cloud/common.py deleted file mode 100644 index 365f6fb..0000000 --- a/fuel/deploy/cloud_deploy/cloud/common.py +++ /dev/null @@ -1,51 +0,0 @@ -import subprocess -import sys -import os -import logging - -N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5, - 'roles': 6, 'pending_roles': 7, 'online': 8} -E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4, - 'changes': 5, 'pending_release_id': 6} -R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4} -RO = {'name': 0, 'conflicts': 1} - -LOG = logging.getLogger(__name__) -LOG.setLevel(logging.DEBUG) -formatter = logging.Formatter('%(message)s') -out_handler = logging.StreamHandler(sys.stdout) -out_handler.setFormatter(formatter) -LOG.addHandler(out_handler) -out_handler = logging.FileHandler('autodeploy.log', mode='w') -out_handler.setFormatter(formatter) -LOG.addHandler(out_handler) - -def exec_cmd(cmd): - process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True) - return process.communicate()[0], process.returncode - -def run_proc(cmd): - process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True) - return process - -def parse(printout, *args): - parsed_list = [] - lines = printout[0].splitlines() - for l in lines[2:]: - parsed = [e.strip() for e in l.split('|')] - parsed_list.append(parsed) - return parsed_list - -def err(error_message): - LOG.error(error_message) - sys.exit(1) - -def check_file_exists(file_path): - if not os.path.isfile(file_path): - err('ERROR: File %s not found\n' % file_path) diff --git a/fuel/deploy/cloud_deploy/cloud/configure_environment.py b/fuel/deploy/cloud_deploy/cloud/configure_environment.py deleted file mode 100644 index 426bbd1..0000000 --- a/fuel/deploy/cloud_deploy/cloud/configure_environment.py +++ /dev/null @@ -1,74 +0,0 @@ -import common -import os -import shutil - -from configure_settings import ConfigureSettings -from configure_network import ConfigureNetwork -from configure_nodes import ConfigureNodes - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err -LOG = common.LOG - -class ConfigureEnvironment(object): - - def __init__(self, dea, yaml_config_dir, release_id, node_id_roles_dict): - self.env_id = None - self.dea = dea - self.yaml_config_dir = yaml_config_dir - self.env_name = dea.get_environment_name() - self.release_id = release_id - self.node_id_roles_dict = node_id_roles_dict - self.required_networks = [] - - def env_exists(self, env_name): - env_list = parse(exec_cmd('fuel env --list')) - for env in env_list: - if env[E['name']] == env_name and env[E['status']] == 'new': - self.env_id = env[E['id']] - return True - return False - - def configure_environment(self): - LOG.debug('Configure environment\n') - if os.path.exists(self.yaml_config_dir): - LOG.debug('Deleting existing config directory %s\n' - % self.yaml_config_dir) - shutil.rmtree(self.yaml_config_dir) - LOG.debug('Creating new config directory %s\n' % self.yaml_config_dir) - os.makedirs(self.yaml_config_dir) - - LOG.debug('Creating environment %s release %s, mode ha, network-mode ' - 'neutron, net-segment-type vlan\n' - % (self.env_name, self.release_id)) - exec_cmd('fuel env create --name %s --release %s --mode ha ' - '--network-mode neutron --net-segment-type vlan' - % (self.env_name, self.release_id)) - - if not self.env_exists(self.env_name): - err("Failed to create environment %s\n" % self.env_name) - self.config_settings() - self.config_network() - self.config_nodes() - - def config_settings(self): - settings = ConfigureSettings(self.yaml_config_dir, self.env_id, - self.dea) - settings.config_settings() - - def config_network(self): - network = ConfigureNetwork(self.yaml_config_dir, self.env_id, self.dea) - network.config_network() - - def config_nodes(self): - nodes = ConfigureNodes(self.yaml_config_dir, self.env_id, - self.node_id_roles_dict, self.dea) - nodes.config_nodes() - - - diff --git a/fuel/deploy/cloud_deploy/cloud/configure_network.py b/fuel/deploy/cloud_deploy/cloud/configure_network.py deleted file mode 100644 index f4d6f87..0000000 --- a/fuel/deploy/cloud_deploy/cloud/configure_network.py +++ /dev/null @@ -1,62 +0,0 @@ -import common -import yaml -import io - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err -check_file_exists = common.check_file_exists -LOG = common.LOG - -class ConfigureNetwork(object): - - def __init__(self, yaml_config_dir, env_id, dea): - self.yaml_config_dir = yaml_config_dir - self.env_id = env_id - self.dea = dea - self.required_networks = [] - - def download_network_config(self): - LOG.debug('Download network config for environment %s\n' % self.env_id) - exec_cmd('fuel network --env %s --download --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def upload_network_config(self): - LOG.debug('Upload network config for environment %s\n' % self.env_id) - exec_cmd('fuel network --env %s --upload --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def config_network(self): - LOG.debug('Configure network\n') - self.download_network_config() - self.modify_network_config() - self.upload_network_config() - - def modify_network_config(self): - LOG.debug('Modify network config for environment %s\n' % self.env_id) - network_yaml = (self.yaml_config_dir + '/network_%s.yaml' - % self.env_id) - check_file_exists(network_yaml) - - network_config = self.dea.get_networks() - - - with io.open(network_yaml) as stream: - network = yaml.load(stream) - - net_names = self.dea.get_network_names() - net_id = {} - for net in network['networks']: - if net['name'] in net_names: - net_id[net['name']] = {'id': net['id'], - 'group_id': net['group_id']} - - for network in network_config['networks']: - network.update(net_id[network['name']]) - - with io.open(network_yaml, 'w') as stream: - yaml.dump(network_config, stream, default_flow_style=False)
\ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/configure_nodes.py b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py deleted file mode 100644 index a5e24a8..0000000 --- a/fuel/deploy/cloud_deploy/cloud/configure_nodes.py +++ /dev/null @@ -1,108 +0,0 @@ -import common -import yaml -import io -import glob - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err -check_file_exists = common.check_file_exists -LOG = common.LOG - - -class ConfigureNodes(object): - - def __init__(self, yaml_config_dir, env_id, node_id_roles_dict, dea): - self.yaml_config_dir = yaml_config_dir - self.env_id = env_id - self.node_id_roles_dict = node_id_roles_dict - self.dea = dea - - def config_nodes(self): - LOG.debug('Configure nodes\n') - for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems(): - exec_cmd('fuel node set --node-id %s --role %s --env %s' - % (node_id, ','.join(roles_shelf_blade[0]), self.env_id)) - - self.download_deployment_config() - self.modify_node_network_schemes() - self.upload_deployment_config() - - for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems(): - self.download_interface_config(node_id) - self.modify_node_interface(node_id) - self.upload_interface_config(node_id) - - def modify_node_network_schemes(self): - LOG.debug('Modify node network schemes in environment %s\n' % self.env_id) - for node_file in glob.glob('%s/deployment_%s/*.yaml' - % (self.yaml_config_dir, self.env_id)): - check_file_exists(node_file) - - if 'compute' in node_file: - node_type = 'compute' - else: - node_type = 'controller' - - network_scheme = self.dea.get_network_scheme(node_type) - - with io.open(node_file) as stream: - node = yaml.load(stream) - - node['network_scheme']['transformations'] = network_scheme - - with io.open(node_file, 'w') as stream: - yaml.dump(node, stream, default_flow_style=False) - - - def download_deployment_config(self): - LOG.debug('Download deployment config for environment %s\n' % self.env_id) - r, c = exec_cmd('fuel deployment --env %s --default --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def upload_deployment_config(self): - LOG.debug('Upload deployment config for environment %s\n' % self.env_id) - r, c = exec_cmd('fuel deployment --env %s --upload --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def download_interface_config(self, node_id): - LOG.debug('Download interface config for node %s\n' % node_id) - r, c = exec_cmd('fuel node --env %s --node %s --network --download ' - '--dir %s' % (self.env_id, node_id, - self.yaml_config_dir)) - - def upload_interface_config(self, node_id): - LOG.debug('Upload interface config for node %s\n' % node_id) - r, c = exec_cmd('fuel node --env %s --node %s --network --upload ' - '--dir %s' % (self.env_id, node_id, - self.yaml_config_dir)) - - def modify_node_interface(self, node_id): - LOG.debug('Modify interface config for node %s\n' % node_id) - interface_yaml = (self.yaml_config_dir + '/node_%s/interfaces.yaml' - % node_id) - - with io.open(interface_yaml) as stream: - interfaces = yaml.load(stream) - - net_name_id = {} - for interface in interfaces: - for network in interface['assigned_networks']: - net_name_id[network['name']] = network['id'] - - interface_config = self.dea.get_interfaces() - - for interface in interfaces: - interface['assigned_networks'] = [] - for net_name in interface_config[interface['name']]: - net = {} - net['id'] = net_name_id[net_name] - net['name'] = net_name - interface['assigned_networks'].append(net) - - with io.open(interface_yaml, 'w') as stream: - yaml.dump(interfaces, stream, default_flow_style=False)
\ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/configure_settings.py b/fuel/deploy/cloud_deploy/cloud/configure_settings.py deleted file mode 100644 index 3a3e4d5..0000000 --- a/fuel/deploy/cloud_deploy/cloud/configure_settings.py +++ /dev/null @@ -1,47 +0,0 @@ -import common -import yaml -import io - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err -check_file_exists = common.check_file_exists -LOG = common.LOG - -class ConfigureSettings(object): - - def __init__(self, yaml_config_dir, env_id, dea): - self.yaml_config_dir = yaml_config_dir - self.env_id = env_id - self.dea = dea - - def download_settings(self): - LOG.debug('Download settings for environment %s\n' % self.env_id) - r, c = exec_cmd('fuel settings --env %s --download --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def upload_settings(self): - LOG.debug('Upload settings for environment %s\n' % self.env_id) - r, c = exec_cmd('fuel settings --env %s --upload --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def config_settings(self): - LOG.debug('Configure settings\n') - self.download_settings() - self.modify_settings() - self.upload_settings() - - def modify_settings(self): - LOG.debug('Modify settings for environment %s\n' % self.env_id) - settings_yaml = (self.yaml_config_dir + '/settings_%s.yaml' - % self.env_id) - check_file_exists(settings_yaml) - - settings = self.dea.get_settings() - - with io.open(settings_yaml, 'w') as stream: - yaml.dump(settings, stream, default_flow_style=False) diff --git a/fuel/deploy/cloud_deploy/cloud/dea.py b/fuel/deploy/cloud_deploy/cloud/dea.py deleted file mode 100644 index 295636a..0000000 --- a/fuel/deploy/cloud_deploy/cloud/dea.py +++ /dev/null @@ -1,86 +0,0 @@ -import yaml -import io - -class DeploymentEnvironmentAdapter(object): - def __init__(self): - self.dea_struct = None - self.blade_ids_per_shelves = {} - self.blades_per_shelves = {} - self.shelf_ids = [] - self.info_per_shelves = {} - self.network_names = [] - - def parse_yaml(self, yaml_path): - with io.open(yaml_path) as yaml_file: - self.dea_struct = yaml.load(yaml_file) - self.collect_shelf_and_blade_info() - self.collect_shelf_info() - self.collect_network_names() - - def get_no_of_blades(self): - no_of_blades = 0 - for shelf in self.dea_struct['shelf']: - no_of_blades += len(shelf['blade']) - return no_of_blades - - def collect_shelf_info(self): - self.info_per_shelves = {} - for shelf in self.dea_struct['shelf']: - self.info_per_shelves[shelf['id']] = shelf - - def get_shelf_info(self, shelf): - return (self.info_per_shelves[shelf]['type'], - self.info_per_shelves[shelf]['mgmt_ip'], - self.info_per_shelves[shelf]['username'], - self.info_per_shelves[shelf]['password']) - - def get_environment_name(self): - return self.dea_struct['name'] - - def get_shelf_ids(self): - return self.shelf_ids - - def get_blade_ids_per_shelf(self, shelf_id): - return self.blade_ids_per_shelves[shelf_id] - - def get_blade_ids_per_shelves(self): - return self.blade_ids_per_shelves - - def collect_shelf_and_blade_info(self): - self.blade_ids_per_shelves = {} - self.blades_per_shelves = {} - self.shelf_ids = [] - for shelf in self.dea_struct['shelf']: - self.shelf_ids.append(shelf['id']) - blade_ids = self.blade_ids_per_shelves[shelf['id']] = [] - blades = self.blades_per_shelves[shelf['id']] = {} - for blade in shelf['blade']: - blade_ids.append(blade['id']) - blades[blade['id']] = blade - - def has_role(self, role, shelf, blade): - blade = self.blades_per_shelves[shelf][blade] - if role == 'compute': - return True if 'roles' not in blade else False - return (True if 'roles' in blade and role in blade['roles'] - else False) - - def collect_network_names(self): - self.network_names = [] - for network in self.dea_struct['networks']['networks']: - self.network_names.append(network['name']) - - def get_networks(self): - return self.dea_struct['networks'] - - def get_network_names(self): - return self.network_names - - def get_settings(self): - return self.dea_struct['settings'] - - def get_network_scheme(self, node_type): - return self.dea_struct[node_type] - - def get_interfaces(self): - return self.dea_struct['interfaces']
\ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/deploy.py b/fuel/deploy/cloud_deploy/cloud/deploy.py deleted file mode 100644 index ea33f8b..0000000 --- a/fuel/deploy/cloud_deploy/cloud/deploy.py +++ /dev/null @@ -1,208 +0,0 @@ -import time -import yaml -import io -import os - -import common -from dea import DeploymentEnvironmentAdapter -from configure_environment import ConfigureEnvironment -from deployment import Deployment - -SUPPORTED_RELEASE = 'Juno on CentOS 6.5' - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err -check_file_exists = common.check_file_exists -LOG = common.LOG - -class Deploy(object): - - def __init__(self, yaml_config_dir): - self.supported_release = None - self.yaml_config_dir = yaml_config_dir - self.macs_per_shelf_dict = {} - self.node_ids_dict = {} - self.node_id_roles_dict = {} - self.env_id = None - self.shelf_blades_dict = {} - - def cleanup_fuel_environments(self, env_list): - WAIT_LOOP = 60 - SLEEP_TIME = 10 - for env in env_list: - LOG.debug('Deleting environment %s\n' % env[E['id']]) - exec_cmd('fuel env --env %s --delete' % env[E['id']]) - all_env_erased = False - for i in range(WAIT_LOOP): - env_list = parse(exec_cmd('fuel env list')) - if env_list[0][0]: - time.sleep(SLEEP_TIME) - else: - all_env_erased = True - break - if not all_env_erased: - err('Could not erase these environments %s' - % [(env[E['id']], env[E['status']]) for env in env_list]) - - def cleanup_fuel_nodes(self, node_list): - for node in node_list: - if node[N['status']] == 'discover': - LOG.debug('Deleting node %s\n' % node[N['id']]) - exec_cmd('fuel node --node-id %s --delete-from-db' - % node[N['id']]) - exec_cmd('cobbler system remove --name node-%s' - % node[N['id']]) - - def check_previous_installation(self): - LOG.debug('Check previous installation\n') - env_list = parse(exec_cmd('fuel env list')) - if env_list[0][0]: - self.cleanup_fuel_environments(env_list) - node_list = parse(exec_cmd('fuel node list')) - if node_list[0][0]: - self.cleanup_fuel_nodes(node_list) - - def check_supported_release(self): - LOG.debug('Check supported release: %s\n' % SUPPORTED_RELEASE) - release_list = parse(exec_cmd('fuel release -l')) - for release in release_list: - if release[R['name']] == SUPPORTED_RELEASE: - self.supported_release = release - break - if not self.supported_release: - err('This Fuel does not contain the following ' - 'release: %s\n' % SUPPORTED_RELEASE) - - def check_prerequisites(self): - LOG.debug('Check prerequisites\n') - self.check_supported_release() - self.check_previous_installation() - - def find_mac_in_dict(self, mac): - for shelf, blade_dict in self.macs_per_shelf_dict.iteritems(): - for blade, mac_list in blade_dict.iteritems(): - if mac in mac_list: - return shelf, blade - - def all_blades_discovered(self): - for shelf, blade_dict in self.node_ids_dict.iteritems(): - for blade, node_id in blade_dict.iteritems(): - if not node_id: - return False - return True - - def not_discovered_blades_summary(self): - summary = '' - for shelf, blade_dict in self.node_ids_dict.iteritems(): - for blade, node_id in blade_dict.iteritems(): - if not node_id: - summary += '[shelf %s, blade %s]\n' % (shelf, blade) - return summary - - def collect_blade_ids_per_shelves(self, dea): - self.shelf_blades_dict = dea.get_blade_ids_per_shelves() - - def node_discovery(self, node_list, discovered_macs): - for node in node_list: - if (node[N['status']] == 'discover' and - node[N['online']] == 'True' and - node[N['mac']] not in discovered_macs): - discovered_macs.append(node[N['mac']]) - shelf_blade = self.find_mac_in_dict(node[N['mac']]) - if shelf_blade: - self.node_ids_dict[shelf_blade[0]][shelf_blade[1]] = \ - node[N['id']] - - def discovery_waiting_loop(self, discovered_macs): - WAIT_LOOP = 180 - SLEEP_TIME = 10 - all_discovered = False - for i in range(WAIT_LOOP): - node_list = parse(exec_cmd('fuel node list')) - if node_list[0][0]: - self.node_discovery(node_list, discovered_macs) - if self.all_blades_discovered(): - all_discovered = True - break - else: - time.sleep(SLEEP_TIME) - return all_discovered - - def wait_for_discovered_blades(self): - LOG.debug('Wait for discovered blades\n') - discovered_macs = [] - for shelf, blade_list in self.shelf_blades_dict.iteritems(): - self.node_ids_dict[shelf] = {} - for blade in blade_list: - self.node_ids_dict[shelf][blade] = None - all_discovered = self.discovery_waiting_loop(discovered_macs) - if not all_discovered: - err('Not all blades have been discovered: %s\n' - % self.not_discovered_blades_summary()) - - def get_mac_addresses(self, macs_yaml): - with io.open(macs_yaml, 'r') as stream: - self.macs_per_shelf_dict = yaml.load(stream) - - def assign_roles_to_cluster_node_ids(self, dea): - self.node_id_roles_dict = {} - for shelf, blades_dict in self.node_ids_dict.iteritems(): - for blade, node_id in blades_dict.iteritems(): - role_list = [] - if dea.has_role('controller', shelf, blade): - role_list.extend(['controller', 'mongo']) - if dea.has_role('cinder', shelf, blade): - role_list.extend(['cinder']) - elif dea.has_role('compute', shelf, blade): - role_list.extend(['compute']) - self.node_id_roles_dict[node_id] = (role_list, shelf, blade) - - def configure_environment(self, dea): - config_env = ConfigureEnvironment(dea, self.yaml_config_dir, - self.supported_release[R['id']], - self.node_id_roles_dict) - config_env.configure_environment() - self.env_id = config_env.env_id - - def deploy(self, dea): - dep = Deployment(dea, self.yaml_config_dir, self.env_id, - self.node_id_roles_dict) - dep.deploy() - - -def main(): - - base_dir = os.path.dirname(os.path.realpath(__file__)) - dea_yaml = base_dir + '/dea.yaml' - check_file_exists(dea_yaml) - macs_yaml = base_dir + '/macs.yaml' - check_file_exists(macs_yaml) - - yaml_config_dir = '/var/lib/opnfv/pre_deploy' - - deploy = Deploy(yaml_config_dir) - dea = DeploymentEnvironmentAdapter() - dea.parse_yaml(dea_yaml) - - deploy.get_mac_addresses(macs_yaml) - - deploy.collect_blade_ids_per_shelves(dea) - - deploy.check_prerequisites() - - deploy.wait_for_discovered_blades() - - deploy.assign_roles_to_cluster_node_ids(dea) - - deploy.configure_environment(dea) - - deploy.deploy(dea) - - -if __name__ == '__main__': - main()
\ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/deployment.py b/fuel/deploy/cloud_deploy/cloud/deployment.py deleted file mode 100644 index 831059b..0000000 --- a/fuel/deploy/cloud_deploy/cloud/deployment.py +++ /dev/null @@ -1,100 +0,0 @@ -import common -import os -import shutil -import glob -import yaml -import io -import time - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -run_proc = common.run_proc -parse = common.parse -err = common.err -LOG = common.LOG - - -class Deployment(object): - - def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict): - self.dea = dea - self.env_name = dea.get_environment_name() - self.yaml_config_dir = yaml_config_dir - self.env_id = env_id - self.node_id_roles_dict = node_id_roles_dict - self.node_id_list = [] - for node_id in self.node_id_roles_dict.iterkeys(): - self.node_id_list.append(node_id) - self.node_id_list.sort() - - def download_deployment_info(self): - LOG.debug('Download deployment info for environment %s\n' % self.env_id) - deployment_dir = self.yaml_config_dir + '/deployment_%s' % self.env_id - if os.path.exists(deployment_dir): - shutil.rmtree(deployment_dir) - r, c = exec_cmd('fuel --env %s deployment --default --dir %s' - % (self.env_id, self.yaml_config_dir)) - if c > 0: - err('Error: Could not download deployment info for env %s,' - ' reason: %s\n' % (self.env_id, r)) - - def upload_deployment_info(self): - LOG.debug('Upload deployment info for environment %s\n' % self.env_id) - r, c = exec_cmd('fuel --env %s deployment --upload --dir %s' - % (self.env_id, self.yaml_config_dir)) - if c > 0: - err('Error: Could not upload deployment info for env %s,' - ' reason: %s\n' % (self.env_id, r)) - - def pre_deploy(self): - LOG.debug('Running pre-deploy on environment %s\n' % self.env_name) - self.download_deployment_info() - opnfv = {'opnfv': {}} - - for node_file in glob.glob('%s/deployment_%s/*.yaml' - % (self.yaml_config_dir, self.env_id)): - with io.open(node_file) as stream: - node = yaml.load(stream) - - if 'opnfv' not in node: - node.update(opnfv) - - with io.open(node_file, 'w') as stream: - yaml.dump(node, stream, default_flow_style=False) - self.upload_deployment_info() - - - def deploy(self): - WAIT_LOOP = 180 - SLEEP_TIME = 60 - - self.pre_deploy() - - log_file = 'cloud.log' - - LOG.debug('Starting deployment of environment %s\n' % self.env_name) - run_proc('fuel --env %s deploy-changes | strings | tee %s' - % (self.env_id, log_file)) - - ready = False - for i in range(WAIT_LOOP): - env = parse(exec_cmd('fuel env --env %s' % self.env_id)) - LOG.debug('Environment status: %s\n' % env[0][E['status']]) - r, _ = exec_cmd('tail -2 %s | head -1' % log_file) - if r: - LOG.debug('%s\n' % r) - if env[0][E['status']] == 'operational': - ready = True - break - else: - time.sleep(SLEEP_TIME) - exec_cmd('rm %s' % log_file) - - if ready: - LOG.debug('Environment %s successfully deployed\n' % self.env_name) - else: - err('Deployment failed, environment %s is not operational\n' - % self.env_name) diff --git a/fuel/deploy/cloud_deploy/cloud_deploy.py b/fuel/deploy/cloud_deploy/cloud_deploy.py deleted file mode 100644 index 4197519..0000000 --- a/fuel/deploy/cloud_deploy/cloud_deploy.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -import io -import yaml - -from cloud import common -from cloud.dea import DeploymentEnvironmentAdapter -from hardware_adapters.dha import DeploymentHardwareAdapter -from ssh_client import SSHClient - -exec_cmd = common.exec_cmd -err = common.err -check_file_exists = common.check_file_exists -LOG = common.LOG - -class CloudDeploy(object): - - def __init__(self, fuel_ip, fuel_username, fuel_password): - self.fuel_ip = fuel_ip - self.fuel_username = fuel_username - self.fuel_password = fuel_password - self.shelf_blades_dict = {} - self.macs_per_shelf_dict = {} - - def copy_to_fuel_master(self, dir_path=None, file_path=None, target='~'): - if dir_path: - path = '-r ' + dir_path - elif file_path: - path = file_path - LOG.debug('Copying %s to Fuel Master %s' % (path, target)) - if path: - exec_cmd('sshpass -p %s scp -o UserKnownHostsFile=/dev/null' - ' -o StrictHostKeyChecking=no -o ConnectTimeout=15' - ' %s %s@%s:%s' - % (self.fuel_password, path, self.fuel_username, - self.fuel_ip, target)) - - def run_cloud_deploy(self, deploy_dir, deploy_app): - LOG.debug('START CLOUD DEPLOYMENT') - ssh = SSHClient(self.fuel_ip, self.fuel_username, self.fuel_password) - ssh.open() - ssh.run('python %s/%s' % (deploy_dir, deploy_app)) - ssh.close() - - def power_off_blades(self, dea): - for shelf, blade_list in self.shelf_blades_dict.iteritems(): - type, mgmt_ip, username, password = dea.get_shelf_info(shelf) - dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) - dha.power_off_blades(shelf, blade_list) - - def power_on_blades(self, dea): - for shelf, blade_list in self.shelf_blades_dict.iteritems(): - type, mgmt_ip, username, password = dea.get_shelf_info(shelf) - dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) - dha.power_on_blades(shelf, blade_list) - - def set_boot_order(self, dea): - for shelf, blade_list in self.shelf_blades_dict.iteritems(): - type, mgmt_ip, username, password = dea.get_shelf_info(shelf) - dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) - dha.set_boot_order_blades(shelf, blade_list) - - def get_mac_addresses(self, dea, macs_yaml): - self.macs_per_shelf_dict = {} - for shelf, blade_list in self.shelf_blades_dict.iteritems(): - type, mgmt_ip, username, password = dea.get_shelf_info(shelf) - dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) - self.macs_per_shelf_dict[shelf] = dha.get_blades_mac_addresses( - shelf, blade_list) - - with io.open(macs_yaml, 'w') as stream: - yaml.dump(self.macs_per_shelf_dict, stream, - default_flow_style=False) - - def collect_blade_ids_per_shelves(self, dea): - self.shelf_blades_dict = dea.get_blade_ids_per_shelves() - - - -def main(): - - fuel_ip = '10.20.0.2' - fuel_username = 'root' - fuel_password = 'r00tme' - deploy_dir = '~/cloud' - - cloud = CloudDeploy(fuel_ip, fuel_username, fuel_password) - - base_dir = os.path.dirname(os.path.realpath(__file__)) - deployment_dir = base_dir + '/cloud' - macs_yaml = base_dir + '/macs.yaml' - dea_yaml = base_dir + '/dea.yaml' - check_file_exists(dea_yaml) - - cloud.copy_to_fuel_master(dir_path=deployment_dir) - cloud.copy_to_fuel_master(file_path=dea_yaml, target=deploy_dir) - - dea = DeploymentEnvironmentAdapter() - dea.parse_yaml(dea_yaml) - - cloud.collect_blade_ids_per_shelves(dea) - - cloud.power_off_blades(dea) - - cloud.set_boot_order(dea) - - cloud.power_on_blades(dea) - - cloud.get_mac_addresses(dea, macs_yaml) - check_file_exists(dea_yaml) - - cloud.copy_to_fuel_master(file_path=macs_yaml, target=deploy_dir) - - cloud.run_cloud_deploy(deploy_dir, 'deploy.py') - - -if __name__ == '__main__': - main()
\ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py deleted file mode 100644 index c274feb..0000000 --- a/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/dha.py b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py deleted file mode 100644 index 2764aeb..0000000 --- a/fuel/deploy/cloud_deploy/hardware_adapters/dha.py +++ /dev/null @@ -1,61 +0,0 @@ -from hp.hp_adapter import HpAdapter -from libvirt.libvirt_adapter import LibvirtAdapter - -class DeploymentHardwareAdapter(object): - def __new__(cls, server_type, *args): - if cls is DeploymentHardwareAdapter: - if server_type == 'esxi': return EsxiAdapter(*args) - if server_type == 'hp': return HpAdapter(*args) - if server_type == 'dell': return DellAdapter(*args) - if server_type == 'libvirt': return LibvirtAdapter(*args) - return super(DeploymentHardwareAdapter, cls).__new__(cls) - - -class HardwareAdapter(object): - - def power_off_blades(self, shelf, blade_list): - raise NotImplementedError - - def power_off_blade(self, shelf, blade): - raise NotImplementedError - - def power_on_blades(self, shelf, blade_list): - raise NotImplementedError - - def power_on_blade(self, shelf, blade): - raise NotImplementedError - - def power_cycle_blade(self): - raise NotImplementedError - - def set_boot_order_blades(self, shelf, blade_list): - raise NotImplementedError - - def set_boot_order_blade(self, shelf, blade): - raise NotImplementedError - - def reset_to_factory_defaults(self): - raise NotImplementedError - - def configure_networking(self): - raise NotImplementedError - - def get_blade_mac_addresses(self, shelf, blade): - raise NotImplementedError - - def get_hardware_info(self, shelf, blade): - raise NotImplementedError - - -class EsxiAdapter(HardwareAdapter): - - def __init__(self): - self.environment = {1: {1: {'mac': ['00:50:56:8c:05:85']}, - 2: {'mac': ['00:50:56:8c:21:92']}}} - - def get_blade_mac_addresses(self, shelf, blade): - return self.environment[shelf][blade]['mac'] - - -class DellAdapter(HardwareAdapter): - pass diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py deleted file mode 100644 index c274feb..0000000 --- a/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py deleted file mode 100644 index 930d234..0000000 --- a/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py +++ /dev/null @@ -1,288 +0,0 @@ -import re -import time -from netaddr import EUI, mac_unix -from cloud import common -from ssh_client import SSHClient - -LOG = common.LOG -err = common.err - -S = {'bay': 0, 'ilo_name': 1, 'ilo_ip': 2, 'status': 3, 'power': 4, - 'uid_partner': 5} - -class HpAdapter(object): - - def __init__(self, mgmt_ip, username, password): - self.mgmt_ip = mgmt_ip - self.username = username - self.password = password - - class mac_dhcp(mac_unix): - word_fmt = '%.2x' - - def next_ip(self): - digit_list = self.mgmt_ip.split('.') - digit_list[3] = str(int(digit_list[3]) + 1) - self.mgmt_ip = '.'.join(digit_list) - - def connect(self): - verified_ips = [self.mgmt_ip] - ssh = SSHClient(self.mgmt_ip, self.username, self.password) - try: - ssh.open() - except Exception: - self.next_ip() - verified_ips.append(self.mgmt_ip) - ssh = SSHClient(self.mgmt_ip, self.username, self.password) - try: - ssh.open() - except Exception as e: - err('Could not connect to HP Onboard Administrator through ' - 'these IPs: %s, reason: %s' % (verified_ips, e)) - - lines = self.clean_lines(ssh.execute('show oa status')) - for line in lines: - if 'Role: Standby' in line: - ssh.close() - if self.mgmt_ip != verified_ips[0]: - err('Can only talk to OA %s which is the standby OA\n' - % self.mgmt_ip) - else: - LOG.debug('%s is the standby OA, trying next OA\n' - % self.mgmt_ip) - self.next_ip() - verified_ips.append(self.mgmt_ip) - ssh = SSHClient(self.mgmt_ip, self.username, self.password) - try: - ssh.open() - except Exception as e: - err('Could not connect to HP Onboard Administrator' - ' through these IPs: %s, reason: %s' - % (verified_ips, e)) - - elif 'Role: Active' in line: - return ssh - err('Could not reach Active OA through these IPs %s' % verified_ips) - - def get_blades_mac_addresses(self, shelf, blade_list): - macs_per_blade_dict = {} - LOG.debug('Getting MAC addresses for shelf %s, blades %s' - % (shelf, blade_list)) - ssh = self.connect() - for blade in blade_list: - lines = self.clean_lines( - ssh.execute('show server info %s' % blade)) - left, right = self.find_mac(lines, shelf, blade) - - left = EUI(left, dialect=self.mac_dhcp) - right = EUI(right, dialect=self.mac_dhcp) - macs_per_blade_dict[blade] = [str(left), str(right)] - ssh.close() - return macs_per_blade_dict - - def find_mac(self, printout, shelf, blade): - left = False - right = False - for line in printout: - if ('No Server Blade Installed' in line or - 'Invalid Arguments' in line): - err('Blade %d in shelf %d does not exist' % (blade, shelf)) - - seobj = re.search(r'LOM1:1-a\s+([0-9A-F:]+)', line, re.I) - if seobj: - left = seobj.group(1) - else: - seobj = re.search(r'LOM1:2-a\s+([0-9A-F:]+)', line, re.I) - if seobj: - right = seobj.group(1) - if left and right: - return left, right - - def get_hardware_info(self, shelf, blade=None): - ssh = self.connect() - if ssh and not blade: - ssh.close() - return 'HP' - - lines = self.clean_lines(ssh.execute('show server info %s' % blade)) - ssh.close() - - match = r'Product Name:\s+(.+)\Z' - if not re.search(match, str(lines[:])): - LOG.debug('Blade %s in shelf %s does not exist\n' % (blade, shelf)) - return False - - for line in lines: - seobj = re.search(match, line) - if seobj: - return 'HP %s' % seobj.group(1) - return False - - def power_off_blades(self, shelf, blade_list): - return self.set_state(shelf, 'locked', blade_list) - - def power_on_blades(self, shelf, blade_list): - return self.set_state(shelf, 'unlocked', blade_list) - - def set_boot_order_blades(self, shelf, blade_list): - return self.set_boot_order(shelf, blade_list=blade_list) - - def parse(self, lines): - parsed_list = [] - for l in lines[5:-2]: - parsed = [] - cluttered = [e.strip() for e in l.split(' ')] - for p in cluttered: - if p: - parsed.append(p) - parsed_list.append(parsed) - return parsed_list - - def set_state(self, shelf, state, blade_list): - if state not in ['locked', 'unlocked']: - LOG.debug('Incorrect state: %s' % state) - return None - - LOG.debug('Setting state %s for blades %s in shelf %s' - % (state, blade_list, shelf)) - - blade_list = sorted(blade_list) - ssh = self.connect() - - LOG.debug('Check if blades are present') - server_list = self.parse( - self.clean_lines(ssh.execute('show server list'))) - - for blade in blade_list: - if server_list[S['status']] == 'Absent': - LOG.debug('Blade %s in shelf %s is missing. ' - 'Set state %s not performed\n' - % (blade, shelf, state)) - blade_list.remove(blade) - - bladelist = ','.join(blade_list) - - # Use leading upper case on On/Off so it can be reused in match - force = '' - if state == 'locked': - powerstate = 'Off' - force = 'force' - else: - powerstate = 'On' - cmd = 'power%s server %s' % (powerstate, bladelist) - if force: - cmd += ' %s' % force - - LOG.debug(cmd) - ssh.execute(cmd) - - # Check that all blades reach the state which can take some time, - # so re-try a couple of times - LOG.debug('Check if state %s successfully set' % state) - - WAIT_LOOP = 2 - SLEEP_TIME = 3 - - set_blades = [] - - for i in range(WAIT_LOOP): - server_list = self.parse( - self.clean_lines(ssh.execute('show server list'))) - - for blade in blade_list: - for server in server_list: - if (server[S['bay']] == blade and - server[S['power']] == powerstate): - set_blades.append(blade) - break - - all_set = set(blade_list) == set(set_blades) - if all_set: - break - else: - time.sleep(SLEEP_TIME) - - ssh.close() - - if all_set: - LOG.debug('State %s successfully set on blades %s in shelf %d' - % (state, set_blades, shelf)) - return True - else: - LOG.debug('Could not set state %s on blades %s in shelf %s\n' - % (state, set(blade_list) - set(set_blades), shelf)) - return False - - - def clean_lines(self, printout): - lines = [] - for p in [l.strip() for l in printout.splitlines()]: - if p: - lines.append(p) - return lines - - - def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None): - - boot_dict = {'Hard Drive': 'hdd', - 'PXE NIC': 'pxe', - 'CD-ROM': 'cd', - 'USB': 'usb', - 'Diskette Driver': 'disk'} - - boot_options = [b for b in boot_dict.itervalues()] - diff = list(set(boot_dev_list) - set(boot_options)) - if diff: - err('The following boot options %s are not valid' % diff) - - blade_list = sorted(blade_list) - LOG.debug('Setting boot order %s for blades %s in shelf %s' - % (boot_dev_list, blade_list, shelf)) - - ssh = self.connect() - - LOG.debug('Check if blades are present') - server_list = self.parse( - self.clean_lines(ssh.execute('show server list'))) - - for blade in blade_list: - if server_list[S['status']] == 'Absent': - LOG.debug('Blade %s in shelf %s is missing. ' - 'Change boot order %s not performed.\n' - % (blade, shelf, boot_dev_list)) - blade_list.remove(blade) - - bladelist = ','.join(blade_list) - - for boot_dev in reversed(boot_dev_list): - ssh.execute('set server boot first %s %s' % (boot_dev, bladelist)) - - LOG.debug('Check if boot order is successfully set') - - success_list = [] - boot_keys = [b for b in boot_dict.iterkeys()] - for blade in blade_list: - lines = self.clean_lines(ssh.execute('show server boot %s' - % blade)) - boot_order = lines[lines.index('IPL Devices (Boot Order):')+1:] - boot_list = [] - success = False - for b in boot_order: - for k in boot_keys: - if k in b: - boot_list.append(boot_dict[k]) - break - if boot_list == boot_dev_list: - success = True - break - - success_list.append(success) - if success: - LOG.debug('Boot order %s successfully set on blade %s in ' - 'shelf %s\n' % (boot_dev_list, blade, shelf)) - else: - LOG.debug('Failed to set boot order %s on blade %s in ' - 'shelf %s\n' % (boot_dev_list, blade, shelf)) - - ssh.close() - return all(success_list) diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py deleted file mode 100644 index c274feb..0000000 --- a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py deleted file mode 100644 index d332e59..0000000 --- a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py +++ /dev/null @@ -1,153 +0,0 @@ -from lxml import etree -from cloud import common -from ssh_client import SSHClient - -exec_cmd = common.exec_cmd -err = common.err -LOG = common.LOG - - -class LibvirtAdapter(object): - - def __init__(self, mgmt_ip, username, password): - self.mgmt_ip = mgmt_ip - self.username = username - self.password = password - self.parser = etree.XMLParser(remove_blank_text=True) - - def power_off_blades(self, shelf, blade_list): - ssh = SSHClient(self.mgmt_ip, self.username, self.password) - ssh.open() - for blade in blade_list: - LOG.debug('Power off blade %s in shelf %s' % (blade, shelf)) - vm_name = 's%s_b%s' % (shelf, blade) - resp = ssh.execute('virsh destroy %s' % vm_name) - LOG.debug('response: %s' % resp) - ssh.close() - - def power_on_blades(self, shelf, blade_list): - ssh = SSHClient(self.mgmt_ip, self.username, self.password) - ssh.open() - for blade in blade_list: - LOG.debug('Power on blade %s in shelf %s' % (blade, shelf)) - vm_name = 's%s_b%s' % (shelf, blade) - resp = ssh.execute('virsh start %s' % vm_name) - LOG.debug('response: %s' % resp) - ssh.close() - - def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None): - if not boot_dev_list: - boot_dev_list = ['network', 'hd'] - ssh = SSHClient(self.mgmt_ip, self.username, self.password) - ssh.open() - temp_dir= ssh.execute('mktemp -d').strip() - for blade in blade_list: - LOG.debug('Set boot order %s on blade %s in shelf %s' - % (boot_dev_list, blade, shelf)) - vm_name = 's%s_b%s' % (shelf, blade) - resp = ssh.execute('virsh dumpxml %s' % vm_name) - xml_dump = etree.fromstring(resp, self.parser) - os = xml_dump.xpath('/domain/os') - for o in os: - for bootelem in ['boot', 'bootmenu']: - boot = o.xpath(bootelem) - for b in boot: - b.getparent().remove(b) - for dev in boot_dev_list: - b = etree.Element('boot') - b.set('dev', dev) - o.append(b) - bmenu = etree.Element('bootmenu') - bmenu.set('enable', 'no') - o.append(bmenu) - tree = etree.ElementTree(xml_dump) - xml_file = temp_dir + '/%s.xml' % vm_name - with open(xml_file, 'w') as f: - tree.write(f, pretty_print=True, xml_declaration=True) - ssh.execute('virsh define %s' % xml_file) - ssh.execute('rm -fr %s' % temp_dir) - ssh.close() - - def get_blades_mac_addresses(self, shelf, blade_list): - LOG.debug('Get the MAC addresses of blades %s in shelf %s' - % (blade_list, shelf)) - macs_per_blade_dict = {} - ssh = SSHClient(self.mgmt_ip, self.username, self.password) - ssh.open() - for blade in blade_list: - vm_name = 's%s_b%s' % (shelf, blade) - mac_list = macs_per_blade_dict[blade] = [] - resp = ssh.execute('virsh dumpxml %s' % vm_name) - xml_dump = etree.fromstring(resp) - interfaces = xml_dump.xpath('/domain/devices/interface') - for interface in interfaces: - macs = interface.xpath('mac') - for mac in macs: - mac_list.append(mac.get('address')) - ssh.close() - return macs_per_blade_dict - - def load_image_file(self, shelf=None, blade=None, vm=None, - image_path=None): - if shelf and blade: - vm_name = 's%s_b%s' % (shelf, blade) - else: - vm_name = vm - - LOG.debug('Load media file %s into %s ' - % (image_path, 'vm %s' % vm if vm else 'blade %s in shelf %s' - % (shelf, blade))) - - ssh = SSHClient(self.mgmt_ip, self.username, self.password) - ssh.open() - temp_dir= ssh.execute('mktemp -d').strip() - resp = ssh.execute('virsh dumpxml %s' % vm_name) - xml_dump = etree.fromstring(resp) - - disks = xml_dump.xpath('/domain/devices/disk') - for disk in disks: - if disk.get('device') == 'cdrom': - disk.set('type', 'file') - sources = disk.xpath('source') - for source in sources: - disk.remove(source) - source = etree.SubElement(disk, 'source') - source.set('file', image_path) - tree = etree.ElementTree(xml_dump) - xml_file = temp_dir + '/%s.xml' % vm_name - with open(xml_file, 'w') as f: - tree.write(f, pretty_print=True, xml_declaration=True) - ssh.execute('virsh define %s' % xml_file) - ssh.execute('rm -fr %s' % temp_dir) - ssh.close() - - def eject_image_file(self, shelf=None, blade=None, vm=None): - if shelf and blade: - vm_name = 's%s_b%s' % (shelf, blade) - else: - vm_name = vm - - LOG.debug('Eject media file from %s ' - % 'vm %s' % vm if vm else 'blade %s in shelf %s' - % (shelf, blade)) - - ssh = SSHClient(self.mgmt_ip, self.username, self.password) - ssh.open() - temp_dir= ssh.execute('mktemp -d').strip() - resp = ssh.execute('virsh dumpxml %s' % vm_name) - xml_dump = etree.fromstring(resp) - - disks = xml_dump.xpath('/domain/devices/disk') - for disk in disks: - if disk.get('device') == 'cdrom': - disk.set('type', 'block') - sources = disk.xpath('source') - for source in sources: - disk.remove(source) - tree = etree.ElementTree(xml_dump) - xml_file = temp_dir + '/%s.xml' % vm_name - with open(xml_file, 'w') as f: - tree.write(f, pretty_print=True, xml_declaration=True) - ssh.execute('virsh define %s' % xml_file) - ssh.execute('rm -fr %s' % temp_dir) - ssh.close() diff --git a/fuel/deploy/cloud_deploy/ssh_client.py b/fuel/deploy/cloud_deploy/ssh_client.py deleted file mode 100644 index b9aad6c..0000000 --- a/fuel/deploy/cloud_deploy/ssh_client.py +++ /dev/null @@ -1,56 +0,0 @@ -import paramiko -from cloud import common - -TIMEOUT = 600 -LOG = common.LOG - -class SSHClient(object): - - def __init__(self, host, username, password): - self.host = host - self.username = username - self.password = password - self.client = None - - def open(self, timeout=TIMEOUT): - self.client = paramiko.SSHClient() - self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - self.client.connect(self.host, username=self.username, - password=self.password, timeout=timeout) - - def close(self): - if self.client is not None: - self.client.close() - self.client = None - - def execute(self, command, sudo=False, timeout=TIMEOUT): - if sudo and self.username != 'root': - command = "sudo -S -p '' %s" % command - stdin, stdout, stderr = self.client.exec_command(command, - timeout=timeout) - if sudo: - stdin.write(self.password + '\n') - stdin.flush() - return ''.join(''.join(stderr.readlines()) + - ''.join(stdout.readlines())) - - def run(self, command): - transport = self.client.get_transport() - transport.set_keepalive(1) - chan = transport.open_session() - chan.exec_command(command) - - while not chan.exit_status_ready(): - if chan.recv_ready(): - data = chan.recv(1024) - while data: - print data - data = chan.recv(1024) - - if chan.recv_stderr_ready(): - error_buff = chan.recv_stderr(1024) - while error_buff: - print error_buff - error_buff = chan.recv_stderr(1024) - exit_status = chan.recv_exit_status() - LOG.debug('Exit status %s' % exit_status)
\ No newline at end of file |