diff options
author | wu.zhihui <wu.zhihui1@zte.com.cn> | 2016-11-09 09:38:08 +0800 |
---|---|---|
committer | wu.zhihui <wu.zhihui1@zte.com.cn> | 2016-11-09 09:38:08 +0800 |
commit | eed3a9fc0bf490bef2e1c7cc597db6063c11bcac (patch) | |
tree | c4582b43b6e3c22b4ca6d9a1ce885a677a6190a9 /utils | |
parent | 03b2fdd33a019c4eabc8da7ba49c851bf91ffc4e (diff) |
Merge func to utils
Local test is ok.
JIRA: QTIP-131
Change-Id: I7009337903c7ded90dda47a05d6c1c95aa96815d
Signed-off-by: wu.zhihui <wu.zhihui1@zte.com.cn>
Diffstat (limited to 'utils')
-rw-r--r-- | utils/ansible_api.py | 65 | ||||
-rw-r--r-- | utils/args_handler.py | 73 | ||||
-rw-r--r-- | utils/cli.py | 76 | ||||
-rw-r--r-- | utils/create_zones.py | 86 | ||||
-rw-r--r-- | utils/driver.py | 92 | ||||
-rw-r--r-- | utils/env_setup.py | 214 | ||||
-rw-r--r-- | utils/spawn_vm.py | 206 |
7 files changed, 812 insertions, 0 deletions
diff --git a/utils/ansible_api.py b/utils/ansible_api.py new file mode 100644 index 00000000..9e1d249e --- /dev/null +++ b/utils/ansible_api.py @@ -0,0 +1,65 @@ +############################################################################## +# Copyright (c) 2016 ZTE Corp and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import os +from collections import namedtuple +from ansible.executor.playbook_executor import PlaybookExecutor +from ansible.inventory import Inventory +from ansible.parsing.dataloader import DataLoader +from ansible.vars import VariableManager +import logger_utils + +logger = logger_utils.QtipLogger('ansible_api').get + + +class AnsibleApi: + + def __init__(self): + self.variable_manager = VariableManager() + self.loader = DataLoader() + self.passwords = {} + self.pbex = None + + def _check_path(self, file_path): + if not os.path.exists(file_path): + logger.error('The playbook %s does not exist' % file_path) + return False + else: + return True + + def execute_playbook(self, hosts_file, playbook_path, pub_key_file, vars): + if not self._check_path(hosts_file): + return False + + inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, + host_list=hosts_file) + Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', + 'connection', 'module_path', 'forks', 'remote_user', + 'private_key_file', 'ssh_common_args', 'ssh_extra_args', + 'sftp_extra_args', 'scp_extra_args', 'become', + 'become_method', 'become_user', 'verbosity', 'check']) + options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, + connection='ssh', module_path=None, forks=100, remote_user='root', + private_key_file=pub_key_file, ssh_common_args=None, + ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, + become=True, become_method=None, become_user='root', verbosity=None, + check=False) + self.variable_manager.extra_vars = vars + + self.pbex = PlaybookExecutor(playbooks=[playbook_path], inventory=inventory, + variable_manager=self.variable_manager, loader=self.loader, + options=options, passwords=self.passwords) + + return self.pbex.run() + + def get_detail_playbook_stats(self): + if self.pbex: + stats = self.pbex._tqm._stats + return map(lambda x: (x, stats.summarize(x)), stats.processed.keys()) + else: + return None diff --git a/utils/args_handler.py b/utils/args_handler.py new file mode 100644 index 00000000..879fd204 --- /dev/null +++ b/utils/args_handler.py @@ -0,0 +1,73 @@ +############################################################################## +# Copyright (c) 2016 ZTE Corp and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import os +from operator import add +import simplejson as json +from env_setup import Env_setup +from spawn_vm import SpawnVM +from driver import Driver + + +def get_files_in_suite(suite_name, case_type='all'): + benchmark_list = json.load(file('benchmarks/suite/{0}'.format(suite_name))) + return reduce(add, benchmark_list.values()) \ + if case_type == 'all' else benchmark_list[case_type] + + +def get_files_in_test_plan(lab, suite_name, case_type='all'): + test_case_all = os.listdir('./test_plan/{0}/{1}'.format(lab, suite_name)) + return test_case_all if case_type == 'all' else \ + filter(lambda x: case_type in x, test_case_all) + + +def get_benchmark_path(lab, suit, benchmark): + return './test_plan/{0}/{1}/{2}'.format(lab, suit, benchmark) + + +def check_suite(suite_name): + return True if os.path.isfile('benchmarks/suite/' + suite_name) else False + + +def check_lab_name(lab_name): + return True if os.path.isdir('test_plan/' + lab_name) else False + + +def check_benchmark_name(lab, file, benchmark): + return os.path.isfile('test_plan/' + lab + '/' + file + '/' + benchmark) + + +def _get_f_name(test_case_path): + return test_case_path.split('/')[-1] + + +def prepare_ansible_env(benchmark_test_case): + env_setup = Env_setup() + [benchmark, vm_info, benchmark_details, proxy_info] = env_setup.parse(benchmark_test_case) + SpawnVM(vm_info) if len(vm_info) else None + env_setup.call_ping_test() + env_setup.call_ssh_test() + env_setup.update_ansible() + return benchmark, benchmark_details, proxy_info, env_setup + + +def run_benchmark(installer_type, pwd, benchmark, benchmark_details, + proxy_info, env_setup, benchmark_test_case): + driver = Driver() + result = driver.drive_bench(installer_type, pwd, benchmark, + env_setup.roles_dict.items(), + _get_f_name(benchmark_test_case), + benchmark_details, env_setup.ip_pw_dict.items(), proxy_info) + env_setup.cleanup_authorized_keys() + return result + + +def prepare_and_run_benchmark(installer_type, pwd, benchmark_test_case): + benchmark, benchmark_details, proxy_info, env_setup = prepare_ansible_env(benchmark_test_case) + return run_benchmark(installer_type, pwd, benchmark, benchmark_details, + proxy_info, env_setup, benchmark_test_case) diff --git a/utils/cli.py b/utils/cli.py new file mode 100644 index 00000000..def70061 --- /dev/null +++ b/utils/cli.py @@ -0,0 +1,76 @@ +############################################################################## +# Copyright (c) 2015 Dell Inc and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import sys +import os +import args_handler +import argparse +import logger_utils + +logger = logger_utils.QtipLogger('cli').get + + +class Cli: + + @staticmethod + def _parse_args(args): + parser = argparse.ArgumentParser() + parser.add_argument('-l ', '--lab', required=True, help='Name of Lab ' + 'on which being tested, These can' + 'be found in the test_plan/ directory. Please ' + 'ensure that you have edited the respective files ' + 'before using them. For testing other than through Jenkins' + ' The user should list default after -l . all the fields in' + ' the files are necessary and should be filled') + parser.add_argument('-f', '--file', required=True, help='File in ' + 'benchmarks/suite/ with the list of tests. there are three files' + '\n compute ' + '\n storage ' + '\n network ' + 'They contain all the tests that will be run. They are listed by suite.' + 'Please ensure there are no empty lines') + parser.add_argument('-b', '--benchmark', help='Name of the benchmark.' + 'Can be found in benchmarks/suite/file_name') + + return parser.parse_args(args) + + def __init__(self, args=sys.argv[1:]): + + args = self._parse_args(args) + if not args_handler.check_suite(args.file): + logger.error("ERROR: This suite file %s doesn't exist under benchmarks/suite/.\ + Please enter correct file." % str(args.file)) + sys.exit(1) + + if not args_handler.check_lab_name(args.lab): + logger.error("You have specified a lab that is not present under test_plan/.\ + Please enter correct file. If unsure how to proceed, use -l default.") + sys.exit(1) + suite = args.file + benchmarks = args_handler.get_files_in_suite(suite) + test_cases = args_handler.get_files_in_test_plan(args.lab, suite) + benchmarks_list = filter(lambda x: x in test_cases, benchmarks) + + if args.benchmark: + if not args_handler.check_benchmark_name(args.lab, args.file, args.benchmark): + logger.error("You have specified an incorrect benchmark.\ + Please enter the correct one.") + sys.exit(1) + else: + logger.info("Starting with " + args.benchmark) + args_handler.prepare_and_run_benchmark( + os.environ['INSTALLER_TYPE'], os.environ['PWD'], + args_handler.get_benchmark_path(args.lab.lower(), args.file, args.benchmark)) + else: + map(lambda x: args_handler.prepare_and_run_benchmark( + os.environ['INSTALLER_TYPE'], os.environ['PWD'], + args_handler.get_benchmark_path(args.lab.lower(), suite, x)), benchmarks_list) + + logger.info("{0} is not a Template in the Directory Enter a Valid file name.\ + or use qtip.py -h for list".format(filter(lambda x: x not in test_cases, benchmarks))) diff --git a/utils/create_zones.py b/utils/create_zones.py new file mode 100644 index 00000000..5e378c83 --- /dev/null +++ b/utils/create_zones.py @@ -0,0 +1,86 @@ +##############################################################################
+# Copyright (c) 2016 Dell Inc, ZTE and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from keystoneclient.auth.identity import v2
+from keystoneclient import session
+from novaclient import client
+import os
+import random
+import logger_utils
+
+logger = logger_utils.QtipLogger('create_zones').get
+
+
+class AvailabilityZone:
+
+ def __init__(self):
+ self._keystone_client = None
+ self._nova_client = None
+
+ def _get_keystone_client(self):
+ """returns a keystone client instance"""
+
+ if self._keystone_client is None:
+ '''
+ self._keystone_client = keystoneclient.v2_0.client.Client(
+ auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+ '''
+ auth = v2.Password(auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+
+ sess = session.Session(auth=auth)
+ else:
+ return self._keystone_client
+
+ return sess
+
+ def _get_nova_client(self):
+ if self._nova_client is None:
+ keystone = self._get_keystone_client()
+ self._nova_client = client.Client('2', session=keystone)
+ return self._nova_client
+
+ def clean_all_aggregates(self):
+ logger.info("clean all aggregates")
+ nova = self._get_nova_client()
+ agg_list = nova.aggregates.list()
+
+ for agg in agg_list:
+ agg_info = nova.aggregates.get_details(agg.id)
+ agg_hosts = agg_info.hosts
+ if len(agg_hosts):
+ for host in agg_hosts:
+ nova.aggregates.remove_host(agg.id, host)
+ nova.aggregates.delete(agg.id)
+
+ def create_aggs(self, args):
+ azone_list = list(set(args))
+ azone_list.sort()
+
+ nova = self._get_nova_client()
+ hyper_list = nova.hypervisors.list()
+
+ if len(azone_list) > len(hyper_list):
+ logger.error("required available zones > compute nodes")
+ return None
+
+ compute_nodes = map(lambda x: x.service['host'], hyper_list)
+ sample_nodes = random.sample(compute_nodes, len(azone_list))
+ sample_nodes.sort()
+
+ for index, item in enumerate(azone_list):
+ logger.info("create aggregates: %s" % str(item))
+ agg_id = nova.aggregates.create(item, item)
+
+ logger.info("add host: %s" % sample_nodes[index])
+ nova.aggregates.add_host(aggregate=agg_id, host=sample_nodes[index])
diff --git a/utils/driver.py b/utils/driver.py new file mode 100644 index 00000000..9894e0f5 --- /dev/null +++ b/utils/driver.py @@ -0,0 +1,92 @@ +############################################################################## +# Copyright (c) 2015 Dell Inc, ZTE and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import logger_utils +from operator import add +from ansible_api import AnsibleApi +from os.path import expanduser + +logger = logger_utils.QtipLogger('driver').get + + +class Driver: + + def __init__(self): + + logger.info("Class driver initialized\n") + self.installer_username = {'fuel': 'root', + 'joid': 'ubuntu', + 'apex': 'heat-admin'} + + @staticmethod + def merge_two_dicts(x, y): + ''' + It is from http://stackoverflow.com/questions/38987/ + how-can-i-merge-two-python-dictionaries-in-a-single-expression + ''' + z = x.copy() + z.update(y) + return z + + def get_common_var_json(self, installer_type, pwd, benchmark_fname, + benchmark_detail, pip_dict, proxy_info): + common_json = {'Dest_dir': expanduser('~') + '/qtip/results', + 'ip1': '', + 'ip2': '', + 'installer': str(installer_type), + 'workingdir': str(pwd), + 'fname': str(benchmark_fname), + 'username': self.installer_username[str(installer_type)]} + common_json.update(benchmark_detail) if benchmark_detail else None + common_json.update(proxy_info) if proxy_info else None + return common_json + + def get_special_var_json(self, role, roles, benchmark_detail, pip_dict): + special_json = {} + index = roles.index(role) + 1 + private_ip = pip_dict[0][1] if pip_dict[0][1][0] else 'NONE' + map(lambda x: special_json.update({'ip' + str(index): x}), role[1])\ + if benchmark_detail and (role[0] == '1-server') else None + map(lambda x: special_json.update({'privateip' + str(index): private_ip}), role[1])\ + if benchmark_detail and (role[0] == '1-server') else None + special_json = self.get_special_var_json(filter(lambda x: x[0] == '1-server', roles)[0], + roles, + benchmark_detail, + pip_dict) if role[0] == '2-host' else special_json + special_json.update({'role': role[0]}) + return special_json + + def run_ansible_playbook(self, benchmark, extra_vars): + logger.info(extra_vars) + ansible_api = AnsibleApi() + ansible_api.execute_playbook('./config/hosts', + './benchmarks/perftest/{0}.yaml'.format(benchmark), + './config/QtipKey', extra_vars) + return self.get_ansible_result(extra_vars['role'], ansible_api.get_detail_playbook_stats()) + + def drive_bench(self, installer_type, pwd, benchmark, roles, benchmark_fname, + benchmark_detail=None, pip_dict=None, proxy_info=None): + roles = sorted(roles) + pip_dict = sorted(pip_dict) + var_json = self.get_common_var_json(installer_type, pwd, benchmark_fname, + benchmark_detail, pip_dict, proxy_info) + result = map(lambda role: self.run_ansible_playbook + (benchmark, self.merge_two_dicts(var_json, + self.get_special_var_json(role, roles, + benchmark_detail, + pip_dict))), roles) + return reduce(self._merge_ansible_result, result) + + def get_ansible_result(self, role, stats): + result = reduce(add, map(lambda x: x[1]['failures'] + x[1]['unreachable'], stats)) + return {'result': result, + 'detail': {role: stats}} + + def _merge_ansible_result(self, result_1, result_2): + return {'result': result_1['result'] + result_2['result'], + 'detail': self.merge_two_dicts(result_1['detail'], result_2['detail'])} diff --git a/utils/env_setup.py b/utils/env_setup.py new file mode 100644 index 00000000..7bbedfcf --- /dev/null +++ b/utils/env_setup.py @@ -0,0 +1,214 @@ +############################################################################## +# Copyright (c) 2016 Dell Inc, ZTE and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import os +import random +import socket +import sys +import time +from collections import defaultdict +from os.path import expanduser +import paramiko +import yaml +import logger_utils + +logger = logger_utils.QtipLogger('env_setup').get + + +class Env_setup: + + roles_ip_list = [] # ROLE and its corresponding IP address list + ip_pw_list = [] # IP and password, this will be used to ssh + roles_dict = defaultdict(list) + ip_pw_dict = defaultdict(list) + ip_pip_list = [] + vm_parameters = defaultdict(list) + benchmark_details = defaultdict() + benchmark = '' + + def __init__(self): + print '\nParsing class initiated\n' + self.roles_ip_list[:] = [] + self.ip_pw_list[:] = [] + self.roles_dict.clear() + self.ip_pw_dict.clear() + self.ip_pip_list[:] = [] + self.proxy_info = {} + self.vm_parameters.clear() + self.benchmark_details.clear() + self.benchmark = '' + + @staticmethod + def write_to_file(role): + f_name_2 = open('./config/hosts', 'w') + print role.items() + for k in role: + f_name_2.write('[' + k + ']\n') + num = len(role[k]) + for x in range(num): + f_name_2.write(role[k][x] + '\n') + f_name_2.close() + + @staticmethod + def ssh_test(hosts): + for ip, pw in hosts: + logger.info('Beginning SSH Test: %s \n' % ip) + os.system('ssh-keyscan %s >> /root/.ssh/known_hosts' % ip) + time.sleep(2) + + ssh_cmd = './scripts/qtip_creds.sh %s' % ip + logger.info("run command: %s " % ssh_cmd) + os.system(ssh_cmd) + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(ip, key_filename='./config/QtipKey') + + for attempts in range(100): + try: + stdin, stdout, stderr = ssh.exec_command('uname') + if not stderr.readlines(): + logger.info('SSH successful') + break + except socket.error: + logger.error('SSH is still unavailable, retry!') + time.sleep(2) + if attempts == 99: + logger.error("Try 99 times, SSH failed: %s" % ip) + + @staticmethod + def ping_test(lister, attempts=30): + for k, v in lister.iteritems(): + time.sleep(10) + for val in v: + ipvar = val + ping_cmd = 'ping -D -c1 {0}'.format(ipvar) + for i in range(attempts): + if os.system(ping_cmd) != 0: + print '\nWaiting for machine\n' + time.sleep(10) + else: + break + print ('\n\n %s is UP \n\n ' % ipvar) + + @staticmethod + def fetch_compute_ips(): + logger.info("Fetch compute ips through installer") + ips = [] + + installer_type = str(os.environ['INSTALLER_TYPE'].lower()) + installer_ip = str(os.environ['INSTALLER_IP']) + if installer_type not in ["fuel", "compass"]: + raise RuntimeError("%s is not supported" % installer_type) + if not installer_ip: + raise RuntimeError("undefine environment variable INSTALLER_IP") + + cmd = "bash ./scripts/fetch_compute_ips.sh -i %s -a %s" % \ + (installer_type, installer_ip) + logger.info(cmd) + os.system(cmd) + with open(expanduser('~') + "/qtip/ips.log", "r") as file: + data = file.read() + if data: + ips.extend(data.rstrip('\n').split('\n')) + logger.info("All compute ips: %s" % ips) + return ips + + def check_machine_ips(self, host_tag): + logger.info("Check machine ips") + ips = self.fetch_compute_ips() + ips_num = len(ips) + num = len(host_tag) + if num > ips_num: + err = "host num %s > compute ips num %s" % (num, ips_num) + raise RuntimeError(err) + + for x in range(num): + hostlabel = 'machine_' + str(x + 1) + if host_tag[hostlabel]['ip']: + if host_tag[hostlabel]['ip'] in ips: + info = "%s's ip %s is defined by test case yaml file" % \ + (hostlabel, host_tag[hostlabel]['ip']) + logger.info(info) + else: + err = "%s is not in %s" % (host_tag[hostlabel]['ip'], ips) + raise RuntimeError(err) + else: + host_tag[hostlabel]['ip'] = random.choice(ips) + info = "assign ip %s to %s" % (host_tag[hostlabel]['ip'], hostlabel) + ips.remove(host_tag[hostlabel]['ip']) + + def get_host_machine_info(self, host_tag): + num = len(host_tag) + offset = len(self.roles_ip_list) + self.check_machine_ips(host_tag) + for x in range(num): + hostlabel = 'machine_' + str(x + 1) + self.roles_ip_list.insert( + offset, (host_tag[hostlabel]['role'], host_tag[hostlabel]['ip'])) + self.ip_pw_list.insert( + offset, (host_tag[hostlabel]['ip'], host_tag[hostlabel]['pw'])) + + def get_virtual_machine_info(self, virtual_tag): + + num = len(virtual_tag) + for x in range(num): + host_label = 'virtualmachine_' + str(x + 1) + for k, v in virtual_tag[host_label].iteritems(): + self.vm_parameters[k].append(v) + + def get_bench_mark_details(self, detail_dic): + + print detail_dic + for k, v in detail_dic.items(): + self.benchmark_details[k] = v + + def parse(self, config_file_path): + try: + f_name = open(config_file_path, 'r+') + doc = yaml.safe_load(f_name) + f_name.close() + if doc['Scenario']['benchmark']: + self.benchmark = doc['Scenario']['benchmark'] + if doc['Context']['Virtual_Machines']: + self.get_virtual_machine_info(doc['Context']['Virtual_Machines']) + if doc['Context']['Host_Machines']: + self.get_host_machine_info(doc['Context']['Host_Machines']) + if doc.get('Scenario', {}).get('benchmark_details', {}): + self.get_bench_mark_details(doc.get('Scenario', {}).get('benchmark_details', {})) + if 'Proxy_Environment' in doc['Context'].keys(): + self.proxy_info['http_proxy'] = doc['Context']['Proxy_Environment']['http_proxy'] + self.proxy_info['https_proxy'] = doc['Context']['Proxy_Environment']['https_proxy'] + self.proxy_info['no_proxy'] = doc['Context']['Proxy_Environment']['no_proxy'] + for k, v in self.roles_ip_list: + self.roles_dict[k].append(v) + for k, v in self.ip_pw_list: + self.ip_pw_dict[k].append(v) + return ( + self.benchmark, + self.vm_parameters, + self.benchmark_details.items(), + self.proxy_info) + except KeyboardInterrupt: + print 'ConfigFile Closed: exiting!' + sys.exit(0) + + def update_ansible(self): + self.write_to_file(self.roles_dict) + + def call_ping_test(self): + self.ping_test(self.roles_dict) + + def call_ssh_test(self): + self.ssh_test(self.ip_pw_list) + + def cleanup_authorized_keys(self): + for ip, pw in self.ip_pw_list: + cmd = './scripts/cleanup_creds.sh %s' % ip + logger.info("cleanup authorized_keys: %s " % cmd) + os.system(cmd) diff --git a/utils/spawn_vm.py b/utils/spawn_vm.py new file mode 100644 index 00000000..f38c9a3a --- /dev/null +++ b/utils/spawn_vm.py @@ -0,0 +1,206 @@ +##############################################################################
+# Copyright (c) 2016 Dell Inc, ZTE and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import sys
+import yaml
+import heatclient.client
+import keystoneclient
+import time
+from env_setup import Env_setup
+from create_zones import AvailabilityZone
+import logger_utils
+
+logger = logger_utils.QtipLogger('spawn_vm').get
+
+
+class SpawnVM(Env_setup):
+
+ def __init__(self, vm_info):
+ logger.info('vm_info: %s' % vm_info)
+ vm_role_ip_dict = vm_info.copy()
+ self._keystone_client = None
+ self._heat_client = None
+ self._glance_client = None
+ self._nova_client = None
+ self.azone = AvailabilityZone()
+ # TODO: it should clean up aggregates and stack after test case finished.
+ self.azone.clean_all_aggregates()
+ self.azone.create_aggs(vm_info['availability_zone'])
+ self.heat_template = self.generate_heat_template(vm_info)
+ self.create_stack(vm_role_ip_dict)
+
+ @staticmethod
+ def get_public_network():
+
+ """
+ TODO: GET THE NAMES OF THE PUBLIC NETWORKS for OTHER PROJECTS
+ """
+ installer = os.environ['INSTALLER_TYPE']
+
+ if installer.lower() == 'fuel':
+ return 'admin_floating_net'
+ if installer.lower() == 'apex':
+ return 'external'
+ if installer.lower() == 'compass':
+ return 'ext-net'
+ if installer.lower() == 'joid':
+ return 'ext-net'
+
+ def generate_heat_template(self, vm_params):
+ logger.info('Generating Heat Template')
+ heat_dict = {}
+ try:
+ with open('./config/SampleHeat.yaml', 'r+') as H_temp:
+ heat_dict = yaml.safe_load(H_temp)
+ except yaml.YAMLError as exc:
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ logger.error(
+ 'Error in qtip/config/SampleHeat.yaml at: (%s,%s)' % (mark.line + 1,
+ mark.column + 1))
+ logger.error('EXITING PROGRAM. Correct File and restart')
+ sys.exit(1)
+
+ fopen = open('./config/QtipKey.pub', 'r')
+ fopenstr = fopen.read()
+ fopenstr = fopenstr.rstrip()
+ scriptcmd = '#!/bin/bash \n echo {0} >> foo.txt \n echo {1} >> /root/.ssh/authorized_keys'.format(
+ fopenstr, fopenstr)
+
+ netName = self.get_public_network()
+ heat_dict['heat_template_version'] = '2015-04-30'
+
+ heat_dict['parameters']['public_network'] = {
+ 'type': 'string',
+ 'default': netName
+ }
+
+ for x in range(1, len(vm_params['availability_zone']) + 1):
+ avail_zone = vm_params['availability_zone'][x - 1]
+
+ heat_dict['parameters']['availability_zone_' + str(x)] = \
+ {'description': 'Availability Zone of the instance',
+ 'default': avail_zone,
+ 'type': 'string'}
+
+ heat_dict['resources']['public_port_' + str(x)] = \
+ {'type': 'OS::Neutron::Port',
+ 'properties': {'network': {'get_resource': 'network'},
+ 'security_groups': [{'get_resource': 'security_group'}],
+ 'fixed_ips': [{'subnet_id': {'get_resource': 'subnet'}}]}}
+
+ heat_dict['resources']['floating_ip_' + str(x)] = {
+ 'type': 'OS::Neutron::FloatingIP',
+ 'properties': {'floating_network': {'get_param': 'external_net_name'}}}
+
+ heat_dict['resources']['floating_ip_assoc_' + str(x)] = {
+ 'type': 'OS::Neutron::FloatingIPAssociation',
+ 'properties': {
+ 'floatingip_id': {'get_resource': 'floating_ip_' + str(x)},
+ 'port_id': {'get_resource': 'public_port_' + str(x)}}}
+
+ heat_dict['resources']['my_instance_' + str(x)] = \
+ {'type': 'OS::Nova::Server',
+ 'properties': {'image': {'get_param': 'image'},
+ 'networks':
+ [{'port': {'get_resource': 'public_port_' + str(x)}}],
+ 'flavor': {'get_resource': 'flavor'},
+ 'availability_zone': avail_zone,
+ 'security_groups': [{'get_resource': 'security_group'}],
+ 'name': 'instance' + str(x),
+ 'user_data_format': 'RAW',
+ 'user_data': scriptcmd}}
+
+ heat_dict['outputs']['instance_PIP_' + str(x)] = {
+ 'description': 'IP address of the instance',
+ 'value': {'get_attr': ['my_instance_' + str(x), 'first_address']}}
+
+ heat_dict['outputs']['instance_ip_' + str(x)] = {
+ 'description': 'IP address of the instance',
+ 'value': {'get_attr': ['floating_ip_' + str(x), 'floating_ip_address']}}
+
+ heat_dict['outputs']['availability_instance_' + str(x)] = {
+ 'description': 'Availability Zone of the Instance',
+ 'value': {'get_param': 'availability_zone_' + str(x)}}
+
+ del heat_dict['outputs']['description']
+ logger.info(heat_dict)
+
+ return heat_dict
+
+ def _get_keystone_client(self):
+ """returns a keystone client instance"""
+
+ if self._keystone_client is None:
+ self._keystone_client = keystoneclient.v2_0.client.Client(
+ auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+ return self._keystone_client
+
+ def _get_heat_client(self):
+ """returns a heat client instance"""
+ if self._heat_client is None:
+ keystone = self._get_keystone_client()
+ heat_endpoint = keystone.service_catalog.url_for(
+ service_type='orchestration')
+ self._heat_client = heatclient.client.Client(
+ '1', endpoint=heat_endpoint, token=keystone.auth_token)
+ return self._heat_client
+
+ def create_stack(self, vm_role_ip_dict):
+ stackname = 'QTIP'
+ heat = self._get_heat_client()
+
+ self.delete_stack(stackname)
+
+ logger.info('Start to create stack %s' % stackname)
+ heat.stacks.create(stack_name=stackname, template=self.heat_template)
+
+ stack_status = "IN_PROGRESS"
+ while stack_status != 'COMPLETE':
+ if stack_status == 'IN_PROGRESS':
+ logger.debug('Create in Progress')
+ if stack_status == 'CREATE_FAILED':
+ raise RuntimeError("Stack %s created failed!" % stackname)
+ stack_status = heat.stacks.get(stackname).status
+ time.sleep(15)
+ logger.info('Stack %s Created Complete!' % stackname)
+
+ stack_outputs = heat.stacks.get(stackname).outputs
+
+ for vm in range(len(vm_role_ip_dict['OS_image'])):
+ for i in stack_outputs:
+ instanceKey = "instance_ip_" + str(vm + 1)
+ privateIPkey = 'instance_PIP_' + str(vm + 1)
+ if i['output_key'] == instanceKey:
+ Env_setup.roles_dict[vm_role_ip_dict['role'][vm]] \
+ .append(str(i['output_value']))
+ Env_setup.ip_pw_list.append((str(i['output_value']), ''))
+
+ if i['output_key'] == privateIPkey:
+ Env_setup.ip_pw_dict[vm_role_ip_dict['role'][vm]] = str(i['output_value'])
+
+ logger.info('Getting Public IP(s): %s' % Env_setup.ip_pw_list)
+
+ def delete_stack(self, stack_name):
+ heat = self._get_heat_client()
+
+ stacks = heat.stacks.list()
+ exists = map(lambda x: x.stack_name, stacks)
+ if stack_name in exists:
+ logger.info("Delete stack %s" % stack_name)
+ heat.stacks.delete(stack_name)
+ while stack_name in exists:
+ time.sleep(10)
+ stacks = heat.stacks.list()
+ exists = map(lambda x: x.stack_name, stacks)
+ logger.debug("exists_stacks: %s" % exists)
+ logger.info("%s doesn't exist" % stack_name)
|