diff options
author | Yujun Zhang <zhang.yujunz@zte.com.cn> | 2017-01-08 09:48:31 +0800 |
---|---|---|
committer | Yujun Zhang <zhang.yujunz@zte.com.cn> | 2017-01-11 15:14:16 +0800 |
commit | 23b627df622eeafafa215ce19764310c1d55dd55 (patch) | |
tree | d92c4c9b8a50a03461a40d2a2aba67bc7312041a /legacy/utils | |
parent | 23f48e46a46976ae6f6d97aea11440e6a8b63121 (diff) |
Reorganize the project folders
Code from Brahmaputra is no longer maintained, including:
- docker
- playbooks
- scripts
- utils
They are moved to legacy folder to avoid unnecessary confusion to new developers.
Change-Id: Ia50383ca5c3bd82571eb7b2184e7f83e264ff8a7
Signed-off-by: Yujun Zhang <zhang.yujunz@zte.com.cn>
Diffstat (limited to 'legacy/utils')
23 files changed, 1337 insertions, 0 deletions
diff --git a/legacy/utils/__init__.py b/legacy/utils/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/legacy/utils/__init__.py diff --git a/legacy/utils/ansible_api.py b/legacy/utils/ansible_api.py new file mode 100644 index 00000000..9e1d249e --- /dev/null +++ b/legacy/utils/ansible_api.py @@ -0,0 +1,65 @@ +############################################################################## +# Copyright (c) 2016 ZTE Corp and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import os +from collections import namedtuple +from ansible.executor.playbook_executor import PlaybookExecutor +from ansible.inventory import Inventory +from ansible.parsing.dataloader import DataLoader +from ansible.vars import VariableManager +import logger_utils + +logger = logger_utils.QtipLogger('ansible_api').get + + +class AnsibleApi: + + def __init__(self): + self.variable_manager = VariableManager() + self.loader = DataLoader() + self.passwords = {} + self.pbex = None + + def _check_path(self, file_path): + if not os.path.exists(file_path): + logger.error('The playbook %s does not exist' % file_path) + return False + else: + return True + + def execute_playbook(self, hosts_file, playbook_path, pub_key_file, vars): + if not self._check_path(hosts_file): + return False + + inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, + host_list=hosts_file) + Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', + 'connection', 'module_path', 'forks', 'remote_user', + 'private_key_file', 'ssh_common_args', 'ssh_extra_args', + 'sftp_extra_args', 'scp_extra_args', 'become', + 'become_method', 'become_user', 'verbosity', 'check']) + options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, + connection='ssh', module_path=None, forks=100, remote_user='root', + private_key_file=pub_key_file, ssh_common_args=None, + ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, + become=True, become_method=None, become_user='root', verbosity=None, + check=False) + self.variable_manager.extra_vars = vars + + self.pbex = PlaybookExecutor(playbooks=[playbook_path], inventory=inventory, + variable_manager=self.variable_manager, loader=self.loader, + options=options, passwords=self.passwords) + + return self.pbex.run() + + def get_detail_playbook_stats(self): + if self.pbex: + stats = self.pbex._tqm._stats + return map(lambda x: (x, stats.summarize(x)), stats.processed.keys()) + else: + return None diff --git a/legacy/utils/args_handler.py b/legacy/utils/args_handler.py new file mode 100644 index 00000000..993b1035 --- /dev/null +++ b/legacy/utils/args_handler.py @@ -0,0 +1,73 @@ +############################################################################## +# Copyright (c) 2016 ZTE Corp and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import os +from operator import add +import simplejson as json +from env_setup import Env_setup +from spawn_vm import SpawnVM +from driver import Driver + + +def get_files_in_suite(suite_name, case_type='all'): + benchmark_list = json.load(file('benchmarks/suite/{0}'.format(suite_name))) + return reduce(add, benchmark_list.values()) \ + if case_type == 'all' else benchmark_list[case_type] + + +def get_files_in_test_plan(lab, suite_name, case_type='all'): + test_case_all = os.listdir('benchmarks/testplan/{0}/{1}'.format(lab, suite_name)) + return test_case_all if case_type == 'all' else \ + filter(lambda x: case_type in x, test_case_all) + + +def get_benchmark_path(lab, suit, benchmark): + return 'benchmarks/testplan/{0}/{1}/{2}'.format(lab, suit, benchmark) + + +def check_suite(suite_name): + return True if os.path.isfile('benchmarks/suite/' + suite_name) else False + + +def check_lab_name(lab_name): + return True if os.path.isdir('benchmarks/testplan/' + lab_name) else False + + +def check_benchmark_name(lab, file, benchmark): + return os.path.isfile('benchmarks/testplan/' + lab + '/' + file + '/' + benchmark) + + +def _get_f_name(test_case_path): + return test_case_path.split('/')[-1] + + +def prepare_ansible_env(benchmark_test_case): + env_setup = Env_setup() + [benchmark, vm_info, benchmark_details, proxy_info] = env_setup.parse(benchmark_test_case) + SpawnVM(vm_info) if len(vm_info) else None + env_setup.call_ping_test() + env_setup.call_ssh_test() + env_setup.update_ansible() + return benchmark, benchmark_details, proxy_info, env_setup + + +def run_benchmark(installer_type, pwd, benchmark, benchmark_details, + proxy_info, env_setup, benchmark_test_case): + driver = Driver() + result = driver.drive_bench(installer_type, pwd, benchmark, + env_setup.roles_dict.items(), + _get_f_name(benchmark_test_case), + benchmark_details, env_setup.ip_pw_dict.items(), proxy_info) + env_setup.cleanup_authorized_keys() + return result + + +def prepare_and_run_benchmark(installer_type, pwd, benchmark_test_case): + benchmark, benchmark_details, proxy_info, env_setup = prepare_ansible_env(benchmark_test_case) + return run_benchmark(installer_type, pwd, benchmark, benchmark_details, + proxy_info, env_setup, benchmark_test_case) diff --git a/legacy/utils/cli.py b/legacy/utils/cli.py new file mode 100644 index 00000000..5e566f27 --- /dev/null +++ b/legacy/utils/cli.py @@ -0,0 +1,76 @@ +############################################################################## +# Copyright (c) 2015 Dell Inc and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import sys +import os +import args_handler +import argparse +import logger_utils + +logger = logger_utils.QtipLogger('cli').get + + +class Cli: + + @staticmethod + def _parse_args(args): + parser = argparse.ArgumentParser() + parser.add_argument('-l ', '--lab', required=True, help='Name of Lab ' + 'on which being tested, These can' + 'be found in the benchmarks/testplan/ directory. Please ' + 'ensure that you have edited the respective files ' + 'before using them. For testing other than through Jenkins' + ' The user should list default after -l . all the fields in' + ' the files are necessary and should be filled') + parser.add_argument('-f', '--file', required=True, help='File in ' + 'benchmarks/suite/ with the list of tests. there are three files' + '\n compute ' + '\n storage ' + '\n network ' + 'They contain all the tests that will be run. They are listed by suite.' + 'Please ensure there are no empty lines') + parser.add_argument('-b', '--benchmark', help='Name of the benchmark.' + 'Can be found in benchmarks/suite/file_name') + + return parser.parse_args(args) + + def __init__(self, args=sys.argv[1:]): + + args = self._parse_args(args) + if not args_handler.check_suite(args.file): + logger.error("ERROR: This suite file %s doesn't exist under benchmarks/suite/.\ + Please enter correct file." % str(args.file)) + sys.exit(1) + + if not args_handler.check_lab_name(args.lab): + logger.error("You have specified a lab that is not present under benchmarks/testplan/.\ + Please enter correct file. If unsure how to proceed, use -l default.") + sys.exit(1) + suite = args.file + benchmarks = args_handler.get_files_in_suite(suite) + test_cases = args_handler.get_files_in_test_plan(args.lab, suite) + benchmarks_list = filter(lambda x: x in test_cases, benchmarks) + + if args.benchmark: + if not args_handler.check_benchmark_name(args.lab, args.file, args.benchmark): + logger.error("You have specified an incorrect benchmark.\ + Please enter the correct one.") + sys.exit(1) + else: + logger.info("Starting with " + args.benchmark) + args_handler.prepare_and_run_benchmark( + os.environ['INSTALLER_TYPE'], os.environ['PWD'], + args_handler.get_benchmark_path(args.lab.lower(), args.file, args.benchmark)) + else: + map(lambda x: args_handler.prepare_and_run_benchmark( + os.environ['INSTALLER_TYPE'], os.environ['PWD'], + args_handler.get_benchmark_path(args.lab.lower(), suite, x)), benchmarks_list) + + logger.info("{0} is not a Template in the Directory Enter a Valid file name.\ + or use qtip.py -h for list".format(filter(lambda x: x not in test_cases, benchmarks))) diff --git a/legacy/utils/create_zones.py b/legacy/utils/create_zones.py new file mode 100644 index 00000000..5e378c83 --- /dev/null +++ b/legacy/utils/create_zones.py @@ -0,0 +1,86 @@ +##############################################################################
+# Copyright (c) 2016 Dell Inc, ZTE and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from keystoneclient.auth.identity import v2
+from keystoneclient import session
+from novaclient import client
+import os
+import random
+import logger_utils
+
+logger = logger_utils.QtipLogger('create_zones').get
+
+
+class AvailabilityZone:
+
+ def __init__(self):
+ self._keystone_client = None
+ self._nova_client = None
+
+ def _get_keystone_client(self):
+ """returns a keystone client instance"""
+
+ if self._keystone_client is None:
+ '''
+ self._keystone_client = keystoneclient.v2_0.client.Client(
+ auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+ '''
+ auth = v2.Password(auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+
+ sess = session.Session(auth=auth)
+ else:
+ return self._keystone_client
+
+ return sess
+
+ def _get_nova_client(self):
+ if self._nova_client is None:
+ keystone = self._get_keystone_client()
+ self._nova_client = client.Client('2', session=keystone)
+ return self._nova_client
+
+ def clean_all_aggregates(self):
+ logger.info("clean all aggregates")
+ nova = self._get_nova_client()
+ agg_list = nova.aggregates.list()
+
+ for agg in agg_list:
+ agg_info = nova.aggregates.get_details(agg.id)
+ agg_hosts = agg_info.hosts
+ if len(agg_hosts):
+ for host in agg_hosts:
+ nova.aggregates.remove_host(agg.id, host)
+ nova.aggregates.delete(agg.id)
+
+ def create_aggs(self, args):
+ azone_list = list(set(args))
+ azone_list.sort()
+
+ nova = self._get_nova_client()
+ hyper_list = nova.hypervisors.list()
+
+ if len(azone_list) > len(hyper_list):
+ logger.error("required available zones > compute nodes")
+ return None
+
+ compute_nodes = map(lambda x: x.service['host'], hyper_list)
+ sample_nodes = random.sample(compute_nodes, len(azone_list))
+ sample_nodes.sort()
+
+ for index, item in enumerate(azone_list):
+ logger.info("create aggregates: %s" % str(item))
+ agg_id = nova.aggregates.create(item, item)
+
+ logger.info("add host: %s" % sample_nodes[index])
+ nova.aggregates.add_host(aggregate=agg_id, host=sample_nodes[index])
diff --git a/legacy/utils/dashboard/__init__.py b/legacy/utils/dashboard/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/legacy/utils/dashboard/__init__.py diff --git a/legacy/utils/dashboard/pushtoDB.py b/legacy/utils/dashboard/pushtoDB.py new file mode 100644 index 00000000..427d39c4 --- /dev/null +++ b/legacy/utils/dashboard/pushtoDB.py @@ -0,0 +1,74 @@ +import requests +import json +import datetime +import os +import sys +from qtip.utils import logger_utils + +logger = logger_utils.QtipLogger('push_db').get + +TEST_DB = 'http://testresults.opnfv.org/test/api/v1' + +suite_list = [('compute_result.json', 'compute_test_suite'), + ('network_result.json', 'network_test_suite'), + ('storage_result.json', 'storage_test_suite')] +payload_list = {} + + +def push_results_to_db(db_url, case_name, payload, installer, pod_name): + + url = db_url + "/results" + creation_date = str(datetime.datetime.utcnow().isoformat()) + + params = {"project_name": "qtip", "case_name": case_name, + "pod_name": pod_name, "installer": installer, "start_date": creation_date, + "version": "test", "details": payload} + + headers = {'Content-Type': 'application/json'} + logger.info('pod_name:{0},installer:{1},creation_data:{2}'.format(pod_name, + installer, + creation_date)) + # temporary code, will be deleted after Bigergia dashboard is ready + try: + qtip_testapi_url = "http://testapi.qtip.openzero.net/results" + qtip_testapi_r = requests.post(qtip_testapi_url, data=json.dumps(params), headers=headers) + logger.info('Pushing Results to qtip_testapi: %s'.format(qtip_testapi_r)) + except: + logger.info("Pushing Results to qtip_testapi Error:{0}".format(sys.exc_info()[0])) + + try: + r = requests.post(url, data=json.dumps(params), headers=headers) + logger.info(r) + return True + except: + logger.info("Error:{0}".format(sys.exc_info()[0])) + return False + + +def populate_payload(suite_list): + + global payload_list + for k, v in suite_list: + + if os.path.isfile('results/' + str(k)): + payload_list[k] = v + + +def main(): + + global payload_list + populate_payload(suite_list) + if payload_list: + logger.info(payload_list) + for suite, case in payload_list.items(): + with open('results/' + suite, 'r') as result_file: + j = json.load(result_file) + push_results_to_db(TEST_DB, case, j, + os.environ['INSTALLER_TYPE'], + os.environ['NODE_NAME']) + elif not payload_list: + logger.info('Results not found') + + +if __name__ == "__main__": + main() diff --git a/legacy/utils/driver.py b/legacy/utils/driver.py new file mode 100644 index 00000000..9894e0f5 --- /dev/null +++ b/legacy/utils/driver.py @@ -0,0 +1,92 @@ +############################################################################## +# Copyright (c) 2015 Dell Inc, ZTE and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import logger_utils +from operator import add +from ansible_api import AnsibleApi +from os.path import expanduser + +logger = logger_utils.QtipLogger('driver').get + + +class Driver: + + def __init__(self): + + logger.info("Class driver initialized\n") + self.installer_username = {'fuel': 'root', + 'joid': 'ubuntu', + 'apex': 'heat-admin'} + + @staticmethod + def merge_two_dicts(x, y): + ''' + It is from http://stackoverflow.com/questions/38987/ + how-can-i-merge-two-python-dictionaries-in-a-single-expression + ''' + z = x.copy() + z.update(y) + return z + + def get_common_var_json(self, installer_type, pwd, benchmark_fname, + benchmark_detail, pip_dict, proxy_info): + common_json = {'Dest_dir': expanduser('~') + '/qtip/results', + 'ip1': '', + 'ip2': '', + 'installer': str(installer_type), + 'workingdir': str(pwd), + 'fname': str(benchmark_fname), + 'username': self.installer_username[str(installer_type)]} + common_json.update(benchmark_detail) if benchmark_detail else None + common_json.update(proxy_info) if proxy_info else None + return common_json + + def get_special_var_json(self, role, roles, benchmark_detail, pip_dict): + special_json = {} + index = roles.index(role) + 1 + private_ip = pip_dict[0][1] if pip_dict[0][1][0] else 'NONE' + map(lambda x: special_json.update({'ip' + str(index): x}), role[1])\ + if benchmark_detail and (role[0] == '1-server') else None + map(lambda x: special_json.update({'privateip' + str(index): private_ip}), role[1])\ + if benchmark_detail and (role[0] == '1-server') else None + special_json = self.get_special_var_json(filter(lambda x: x[0] == '1-server', roles)[0], + roles, + benchmark_detail, + pip_dict) if role[0] == '2-host' else special_json + special_json.update({'role': role[0]}) + return special_json + + def run_ansible_playbook(self, benchmark, extra_vars): + logger.info(extra_vars) + ansible_api = AnsibleApi() + ansible_api.execute_playbook('./config/hosts', + './benchmarks/perftest/{0}.yaml'.format(benchmark), + './config/QtipKey', extra_vars) + return self.get_ansible_result(extra_vars['role'], ansible_api.get_detail_playbook_stats()) + + def drive_bench(self, installer_type, pwd, benchmark, roles, benchmark_fname, + benchmark_detail=None, pip_dict=None, proxy_info=None): + roles = sorted(roles) + pip_dict = sorted(pip_dict) + var_json = self.get_common_var_json(installer_type, pwd, benchmark_fname, + benchmark_detail, pip_dict, proxy_info) + result = map(lambda role: self.run_ansible_playbook + (benchmark, self.merge_two_dicts(var_json, + self.get_special_var_json(role, roles, + benchmark_detail, + pip_dict))), roles) + return reduce(self._merge_ansible_result, result) + + def get_ansible_result(self, role, stats): + result = reduce(add, map(lambda x: x[1]['failures'] + x[1]['unreachable'], stats)) + return {'result': result, + 'detail': {role: stats}} + + def _merge_ansible_result(self, result_1, result_2): + return {'result': result_1['result'] + result_2['result'], + 'detail': self.merge_two_dicts(result_1['detail'], result_2['detail'])} diff --git a/legacy/utils/env_setup.py b/legacy/utils/env_setup.py new file mode 100644 index 00000000..7bbedfcf --- /dev/null +++ b/legacy/utils/env_setup.py @@ -0,0 +1,214 @@ +############################################################################## +# Copyright (c) 2016 Dell Inc, ZTE and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import os +import random +import socket +import sys +import time +from collections import defaultdict +from os.path import expanduser +import paramiko +import yaml +import logger_utils + +logger = logger_utils.QtipLogger('env_setup').get + + +class Env_setup: + + roles_ip_list = [] # ROLE and its corresponding IP address list + ip_pw_list = [] # IP and password, this will be used to ssh + roles_dict = defaultdict(list) + ip_pw_dict = defaultdict(list) + ip_pip_list = [] + vm_parameters = defaultdict(list) + benchmark_details = defaultdict() + benchmark = '' + + def __init__(self): + print '\nParsing class initiated\n' + self.roles_ip_list[:] = [] + self.ip_pw_list[:] = [] + self.roles_dict.clear() + self.ip_pw_dict.clear() + self.ip_pip_list[:] = [] + self.proxy_info = {} + self.vm_parameters.clear() + self.benchmark_details.clear() + self.benchmark = '' + + @staticmethod + def write_to_file(role): + f_name_2 = open('./config/hosts', 'w') + print role.items() + for k in role: + f_name_2.write('[' + k + ']\n') + num = len(role[k]) + for x in range(num): + f_name_2.write(role[k][x] + '\n') + f_name_2.close() + + @staticmethod + def ssh_test(hosts): + for ip, pw in hosts: + logger.info('Beginning SSH Test: %s \n' % ip) + os.system('ssh-keyscan %s >> /root/.ssh/known_hosts' % ip) + time.sleep(2) + + ssh_cmd = './scripts/qtip_creds.sh %s' % ip + logger.info("run command: %s " % ssh_cmd) + os.system(ssh_cmd) + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(ip, key_filename='./config/QtipKey') + + for attempts in range(100): + try: + stdin, stdout, stderr = ssh.exec_command('uname') + if not stderr.readlines(): + logger.info('SSH successful') + break + except socket.error: + logger.error('SSH is still unavailable, retry!') + time.sleep(2) + if attempts == 99: + logger.error("Try 99 times, SSH failed: %s" % ip) + + @staticmethod + def ping_test(lister, attempts=30): + for k, v in lister.iteritems(): + time.sleep(10) + for val in v: + ipvar = val + ping_cmd = 'ping -D -c1 {0}'.format(ipvar) + for i in range(attempts): + if os.system(ping_cmd) != 0: + print '\nWaiting for machine\n' + time.sleep(10) + else: + break + print ('\n\n %s is UP \n\n ' % ipvar) + + @staticmethod + def fetch_compute_ips(): + logger.info("Fetch compute ips through installer") + ips = [] + + installer_type = str(os.environ['INSTALLER_TYPE'].lower()) + installer_ip = str(os.environ['INSTALLER_IP']) + if installer_type not in ["fuel", "compass"]: + raise RuntimeError("%s is not supported" % installer_type) + if not installer_ip: + raise RuntimeError("undefine environment variable INSTALLER_IP") + + cmd = "bash ./scripts/fetch_compute_ips.sh -i %s -a %s" % \ + (installer_type, installer_ip) + logger.info(cmd) + os.system(cmd) + with open(expanduser('~') + "/qtip/ips.log", "r") as file: + data = file.read() + if data: + ips.extend(data.rstrip('\n').split('\n')) + logger.info("All compute ips: %s" % ips) + return ips + + def check_machine_ips(self, host_tag): + logger.info("Check machine ips") + ips = self.fetch_compute_ips() + ips_num = len(ips) + num = len(host_tag) + if num > ips_num: + err = "host num %s > compute ips num %s" % (num, ips_num) + raise RuntimeError(err) + + for x in range(num): + hostlabel = 'machine_' + str(x + 1) + if host_tag[hostlabel]['ip']: + if host_tag[hostlabel]['ip'] in ips: + info = "%s's ip %s is defined by test case yaml file" % \ + (hostlabel, host_tag[hostlabel]['ip']) + logger.info(info) + else: + err = "%s is not in %s" % (host_tag[hostlabel]['ip'], ips) + raise RuntimeError(err) + else: + host_tag[hostlabel]['ip'] = random.choice(ips) + info = "assign ip %s to %s" % (host_tag[hostlabel]['ip'], hostlabel) + ips.remove(host_tag[hostlabel]['ip']) + + def get_host_machine_info(self, host_tag): + num = len(host_tag) + offset = len(self.roles_ip_list) + self.check_machine_ips(host_tag) + for x in range(num): + hostlabel = 'machine_' + str(x + 1) + self.roles_ip_list.insert( + offset, (host_tag[hostlabel]['role'], host_tag[hostlabel]['ip'])) + self.ip_pw_list.insert( + offset, (host_tag[hostlabel]['ip'], host_tag[hostlabel]['pw'])) + + def get_virtual_machine_info(self, virtual_tag): + + num = len(virtual_tag) + for x in range(num): + host_label = 'virtualmachine_' + str(x + 1) + for k, v in virtual_tag[host_label].iteritems(): + self.vm_parameters[k].append(v) + + def get_bench_mark_details(self, detail_dic): + + print detail_dic + for k, v in detail_dic.items(): + self.benchmark_details[k] = v + + def parse(self, config_file_path): + try: + f_name = open(config_file_path, 'r+') + doc = yaml.safe_load(f_name) + f_name.close() + if doc['Scenario']['benchmark']: + self.benchmark = doc['Scenario']['benchmark'] + if doc['Context']['Virtual_Machines']: + self.get_virtual_machine_info(doc['Context']['Virtual_Machines']) + if doc['Context']['Host_Machines']: + self.get_host_machine_info(doc['Context']['Host_Machines']) + if doc.get('Scenario', {}).get('benchmark_details', {}): + self.get_bench_mark_details(doc.get('Scenario', {}).get('benchmark_details', {})) + if 'Proxy_Environment' in doc['Context'].keys(): + self.proxy_info['http_proxy'] = doc['Context']['Proxy_Environment']['http_proxy'] + self.proxy_info['https_proxy'] = doc['Context']['Proxy_Environment']['https_proxy'] + self.proxy_info['no_proxy'] = doc['Context']['Proxy_Environment']['no_proxy'] + for k, v in self.roles_ip_list: + self.roles_dict[k].append(v) + for k, v in self.ip_pw_list: + self.ip_pw_dict[k].append(v) + return ( + self.benchmark, + self.vm_parameters, + self.benchmark_details.items(), + self.proxy_info) + except KeyboardInterrupt: + print 'ConfigFile Closed: exiting!' + sys.exit(0) + + def update_ansible(self): + self.write_to_file(self.roles_dict) + + def call_ping_test(self): + self.ping_test(self.roles_dict) + + def call_ssh_test(self): + self.ssh_test(self.ip_pw_list) + + def cleanup_authorized_keys(self): + for ip, pw in self.ip_pw_list: + cmd = './scripts/cleanup_creds.sh %s' % ip + logger.info("cleanup authorized_keys: %s " % cmd) + os.system(cmd) diff --git a/legacy/utils/report/__init__.py b/legacy/utils/report/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/legacy/utils/report/__init__.py diff --git a/legacy/utils/report/get_indices.py b/legacy/utils/report/get_indices.py new file mode 100644 index 00000000..91219c0b --- /dev/null +++ b/legacy/utils/report/get_indices.py @@ -0,0 +1,8 @@ +import json + + +def get_index(suite): + with open('../../results/' + suite + '.json') as result_file: + result_djson = json.load(result_file) + index = result_djson['index'] + return index diff --git a/legacy/utils/report/get_results.py b/legacy/utils/report/get_results.py new file mode 100644 index 00000000..23fd5383 --- /dev/null +++ b/legacy/utils/report/get_results.py @@ -0,0 +1,50 @@ +import os +import json + + +def report_concat(targ_dir, testcase): + machine_temp = [] + machines = [] + + for file in os.listdir(targ_dir): + if file.endswith(".json"): + machine_temp.append(file) + + l = len(machine_temp) + + for x in range(0, l): + file_t = machine_temp[x] + with open(targ_dir + file_t) as result_file: + result_djson = json.load(result_file) + if result_djson['1 Testcase Name'] == str(testcase): + machines.append(result_djson) + return machines + + +def space_count(l): + spc = '' + for x in range(l): + spc = spc + ' ' + return spc + + +def custom_dict(list1, list2, k): + string_1 = '' + for num_1 in range(0, len(list1)): + string_1 = string_1 + space_count(k) + str(list1[num_1][0]) + "=" + str(list2[num_1]) + "\n" + return string_1 + + +def generate_result(dict_a, k): + list_1 = [] + list_2 = [] + count = 0 + for i, j in sorted(dict_a.iteritems()): + list_1.append([]) + list_1[count].append(i) + if (str(type(dict_a.get(i)))) == "<type 'dict'>": + list_2.append(str("\n" + generate_result(dict_a.get(i), int(k + 1)))) + else: + list_2.append(dict_a.get(i)) + count = count + 1 + return custom_dict(list_1, list_2, k) diff --git a/legacy/utils/report/qtip_graph.py b/legacy/utils/report/qtip_graph.py new file mode 100644 index 00000000..acbda40c --- /dev/null +++ b/legacy/utils/report/qtip_graph.py @@ -0,0 +1,30 @@ +import matplotlib +import matplotlib.pyplot as plt +import numpy as np + +matplotlib.use('Agg') + + +def plot_indices(a, b, c): + N = 3 + ind = np.arange(N) + y_axis = (a, b, c) + width = 0.35 + f = plt.figure() + ax = f.gca() + ax.set_autoscale_on(True) + my_bars = ax.bar(ind, y_axis, width, color='b') + ax.set_ylabel('Index Score*') + ax.set_xlabel('Suite') + ax.set_title(' QTIP benchmark scores') + ax.axis('on') + my_bars = ax.bar(ind, y_axis, width) + ax.set_xticks(ind + width / 2) + ax.set_xticklabels(['Compute', 'Storage', 'Network']) + ax.axis([0, 3, 0, 1.25]) + f.text(0.7, 0.01, '* With Comparison to Refernece POD', fontsize=9) + + for rect in my_bars: + height = rect.get_height() + ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, height, ha='center', va='bottom') + f.savefig('qtip_graph.jpeg') diff --git a/legacy/utils/report/qtip_report.py b/legacy/utils/report/qtip_report.py new file mode 100644 index 00000000..6809e892 --- /dev/null +++ b/legacy/utils/report/qtip_report.py @@ -0,0 +1,109 @@ +from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image +from reportlab.lib.styles import getSampleStyleSheet +from reportlab.lib.units import inch +from reportlab.lib.pagesizes import letter +import qtip_graph as graph +import get_indices as results +from get_results import report_concat +from get_results import generate_result + + +def dump_result(Stor, directory, testcase): + try: + lower_s = testcase.lower() + Stor.append(Paragraph(testcase, Style['h3'])) + l1 = report_concat(directory, lower_s) + l = 1 + for a in l1: + Stor.append(Paragraph(testcase + " result_" + str(l), Style['h5'])) + raw_string = generate_result(a, 0) + replaced_string = raw_string.replace('\n', '<br/> ').replace(' ', ' ') + Stor.append(Paragraph(replaced_string, Style['BodyText'])) + l = l + 1 + except OSError: + print "Results for {0} not found".format(testcase) + + +doc = SimpleDocTemplate("../../results/QTIP_results.pdf", pagesize=letter, + rightMargin=72, leftMargin=72, + topMargin=72, bottomMargin=18) +Stor = [] +Style = getSampleStyleSheet() +Title = "QTIP Benchmark Suite" +Stor.append(Paragraph(Title, Style['Title'])) +H1 = "Results" +Stor.append(Spacer(0, 36)) +Stor.append(Paragraph(H1, Style['h2'])) +compute = 0 +storage = 0 +network = 0 +try: + compute = results.get_index('compute_result') +except IOError: + pass + +try: + storage = results.get_index('storage_result') +except IOError: + pass +try: + network = results.get_index('network_result') +except IOError: + pass + +Stor.append(Paragraph("Compute Suite: %f" % compute, Style['h5'])) +Stor.append(Paragraph("Storage Suite: %f" % storage, Style['h5'])) +Stor.append(Paragraph("Network Suite: %f" % network, Style['h5'])) +graph.plot_indices(compute, storage, network) +qtip_graph = ('qtip_graph.jpeg') +im = Image(qtip_graph, 5 * inch, 4 * inch) +Stor.append(im) +Stor.append(Spacer(0, 12)) +Stor.append(Paragraph("Reference POD", Style['h5'])) +ptext = "The Dell OPNFV Lab POD3 has been taken as the reference POD against which the reference results have been collected. The POD consists of 6 identical servers. The details of such a server are:" +Stor.append(Paragraph(ptext, Style['Normal'])) +ptext = "<bullet>•</bullet>Server Type: Dell PowerEdge R630 Server" +Stor.append(Paragraph(ptext, Style['Bullet'])) +ptext = "<bullet>•</bullet>CPU: Intel Xeon E5-2698 @ 2300 MHz" +Stor.append(Paragraph(ptext, Style["Bullet"])) +ptext = "<bullet>•</bullet>RAM: 128GB" +Stor.append(Paragraph(ptext, Style["Bullet"])) +ptext = "<bullet>•</bullet>Storage SSD: 420GB" +Stor.append(Paragraph(ptext, Style["Bullet"])) +ptext = "<bullet>•</bullet>Network Card: Intel 2P X520/2P I350 rNDC" +Stor.append(Paragraph(ptext, Style["Bullet"])) +ptext = "Servers interconnected through a DELL S4810 switch using a 10Gbps physical link" +Stor.append(Paragraph(ptext, Style["Bullet"])) +Stor.append(Spacer(0, 12)) +ptext = "For Further Details of the Reference POD hardware, please visit: https://wiki.opnfv.org/reference_pod_hardware_details" +Stor.append(Paragraph(ptext, Style['Normal'])) +Stor.append(Spacer(0, 12)) +ptext = "For Details of the Reference POD Results, please visit: https://wiki.opnfv.org/reference_pod_qtip_results" +Stor.append(Spacer(0, 12)) +Stor.append(Paragraph(ptext, Style['Normal'])) +Stor.append(Paragraph("RAW Results", Style['h1'])) +Stor.append(Paragraph("Compute Results", Style['h2'])) + +dump_result(Stor, "../../results/dhrystone/", "Dhrystone_bm") +dump_result(Stor, "../../results/dhrystone/", "Dhrystone_vm") + +dump_result(Stor, "../../results/whetstone/", "Whetstone_bm") +dump_result(Stor, "../../results/whetstone/", "Whetstone_vm") + +dump_result(Stor, "../../results/ramspeed/", "Ramspeed_bm") +dump_result(Stor, "../../results/ramspeed/", "Ramspeed_vm") + +dump_result(Stor, "../../results/ssl/", "SSL_bm") +dump_result(Stor, "../../results/ssl/", "SSL_vm") + +Stor.append(Paragraph("Network Results", Style['h2'])) +dump_result(Stor, "../../results/iperf/", "IPERF_bm") +dump_result(Stor, "../../results/iperf/", "IPERF_vm") +dump_result(Stor, "../../results/iperf/", "IPERF_vm_2") + +Stor.append(Paragraph("Storage Results", Style['h2'])) +dump_result(Stor, "../../results/fio/", "fio_bm") +dump_result(Stor, "../../results/fio/", "fio_vm") + + +doc.build(Stor) diff --git a/legacy/utils/spawn_vm.py b/legacy/utils/spawn_vm.py new file mode 100644 index 00000000..f38c9a3a --- /dev/null +++ b/legacy/utils/spawn_vm.py @@ -0,0 +1,206 @@ +##############################################################################
+# Copyright (c) 2016 Dell Inc, ZTE and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import sys
+import yaml
+import heatclient.client
+import keystoneclient
+import time
+from env_setup import Env_setup
+from create_zones import AvailabilityZone
+import logger_utils
+
+logger = logger_utils.QtipLogger('spawn_vm').get
+
+
+class SpawnVM(Env_setup):
+
+ def __init__(self, vm_info):
+ logger.info('vm_info: %s' % vm_info)
+ vm_role_ip_dict = vm_info.copy()
+ self._keystone_client = None
+ self._heat_client = None
+ self._glance_client = None
+ self._nova_client = None
+ self.azone = AvailabilityZone()
+ # TODO: it should clean up aggregates and stack after test case finished.
+ self.azone.clean_all_aggregates()
+ self.azone.create_aggs(vm_info['availability_zone'])
+ self.heat_template = self.generate_heat_template(vm_info)
+ self.create_stack(vm_role_ip_dict)
+
+ @staticmethod
+ def get_public_network():
+
+ """
+ TODO: GET THE NAMES OF THE PUBLIC NETWORKS for OTHER PROJECTS
+ """
+ installer = os.environ['INSTALLER_TYPE']
+
+ if installer.lower() == 'fuel':
+ return 'admin_floating_net'
+ if installer.lower() == 'apex':
+ return 'external'
+ if installer.lower() == 'compass':
+ return 'ext-net'
+ if installer.lower() == 'joid':
+ return 'ext-net'
+
+ def generate_heat_template(self, vm_params):
+ logger.info('Generating Heat Template')
+ heat_dict = {}
+ try:
+ with open('./config/SampleHeat.yaml', 'r+') as H_temp:
+ heat_dict = yaml.safe_load(H_temp)
+ except yaml.YAMLError as exc:
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ logger.error(
+ 'Error in qtip/config/SampleHeat.yaml at: (%s,%s)' % (mark.line + 1,
+ mark.column + 1))
+ logger.error('EXITING PROGRAM. Correct File and restart')
+ sys.exit(1)
+
+ fopen = open('./config/QtipKey.pub', 'r')
+ fopenstr = fopen.read()
+ fopenstr = fopenstr.rstrip()
+ scriptcmd = '#!/bin/bash \n echo {0} >> foo.txt \n echo {1} >> /root/.ssh/authorized_keys'.format(
+ fopenstr, fopenstr)
+
+ netName = self.get_public_network()
+ heat_dict['heat_template_version'] = '2015-04-30'
+
+ heat_dict['parameters']['public_network'] = {
+ 'type': 'string',
+ 'default': netName
+ }
+
+ for x in range(1, len(vm_params['availability_zone']) + 1):
+ avail_zone = vm_params['availability_zone'][x - 1]
+
+ heat_dict['parameters']['availability_zone_' + str(x)] = \
+ {'description': 'Availability Zone of the instance',
+ 'default': avail_zone,
+ 'type': 'string'}
+
+ heat_dict['resources']['public_port_' + str(x)] = \
+ {'type': 'OS::Neutron::Port',
+ 'properties': {'network': {'get_resource': 'network'},
+ 'security_groups': [{'get_resource': 'security_group'}],
+ 'fixed_ips': [{'subnet_id': {'get_resource': 'subnet'}}]}}
+
+ heat_dict['resources']['floating_ip_' + str(x)] = {
+ 'type': 'OS::Neutron::FloatingIP',
+ 'properties': {'floating_network': {'get_param': 'external_net_name'}}}
+
+ heat_dict['resources']['floating_ip_assoc_' + str(x)] = {
+ 'type': 'OS::Neutron::FloatingIPAssociation',
+ 'properties': {
+ 'floatingip_id': {'get_resource': 'floating_ip_' + str(x)},
+ 'port_id': {'get_resource': 'public_port_' + str(x)}}}
+
+ heat_dict['resources']['my_instance_' + str(x)] = \
+ {'type': 'OS::Nova::Server',
+ 'properties': {'image': {'get_param': 'image'},
+ 'networks':
+ [{'port': {'get_resource': 'public_port_' + str(x)}}],
+ 'flavor': {'get_resource': 'flavor'},
+ 'availability_zone': avail_zone,
+ 'security_groups': [{'get_resource': 'security_group'}],
+ 'name': 'instance' + str(x),
+ 'user_data_format': 'RAW',
+ 'user_data': scriptcmd}}
+
+ heat_dict['outputs']['instance_PIP_' + str(x)] = {
+ 'description': 'IP address of the instance',
+ 'value': {'get_attr': ['my_instance_' + str(x), 'first_address']}}
+
+ heat_dict['outputs']['instance_ip_' + str(x)] = {
+ 'description': 'IP address of the instance',
+ 'value': {'get_attr': ['floating_ip_' + str(x), 'floating_ip_address']}}
+
+ heat_dict['outputs']['availability_instance_' + str(x)] = {
+ 'description': 'Availability Zone of the Instance',
+ 'value': {'get_param': 'availability_zone_' + str(x)}}
+
+ del heat_dict['outputs']['description']
+ logger.info(heat_dict)
+
+ return heat_dict
+
+ def _get_keystone_client(self):
+ """returns a keystone client instance"""
+
+ if self._keystone_client is None:
+ self._keystone_client = keystoneclient.v2_0.client.Client(
+ auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+ return self._keystone_client
+
+ def _get_heat_client(self):
+ """returns a heat client instance"""
+ if self._heat_client is None:
+ keystone = self._get_keystone_client()
+ heat_endpoint = keystone.service_catalog.url_for(
+ service_type='orchestration')
+ self._heat_client = heatclient.client.Client(
+ '1', endpoint=heat_endpoint, token=keystone.auth_token)
+ return self._heat_client
+
+ def create_stack(self, vm_role_ip_dict):
+ stackname = 'QTIP'
+ heat = self._get_heat_client()
+
+ self.delete_stack(stackname)
+
+ logger.info('Start to create stack %s' % stackname)
+ heat.stacks.create(stack_name=stackname, template=self.heat_template)
+
+ stack_status = "IN_PROGRESS"
+ while stack_status != 'COMPLETE':
+ if stack_status == 'IN_PROGRESS':
+ logger.debug('Create in Progress')
+ if stack_status == 'CREATE_FAILED':
+ raise RuntimeError("Stack %s created failed!" % stackname)
+ stack_status = heat.stacks.get(stackname).status
+ time.sleep(15)
+ logger.info('Stack %s Created Complete!' % stackname)
+
+ stack_outputs = heat.stacks.get(stackname).outputs
+
+ for vm in range(len(vm_role_ip_dict['OS_image'])):
+ for i in stack_outputs:
+ instanceKey = "instance_ip_" + str(vm + 1)
+ privateIPkey = 'instance_PIP_' + str(vm + 1)
+ if i['output_key'] == instanceKey:
+ Env_setup.roles_dict[vm_role_ip_dict['role'][vm]] \
+ .append(str(i['output_value']))
+ Env_setup.ip_pw_list.append((str(i['output_value']), ''))
+
+ if i['output_key'] == privateIPkey:
+ Env_setup.ip_pw_dict[vm_role_ip_dict['role'][vm]] = str(i['output_value'])
+
+ logger.info('Getting Public IP(s): %s' % Env_setup.ip_pw_list)
+
+ def delete_stack(self, stack_name):
+ heat = self._get_heat_client()
+
+ stacks = heat.stacks.list()
+ exists = map(lambda x: x.stack_name, stacks)
+ if stack_name in exists:
+ logger.info("Delete stack %s" % stack_name)
+ heat.stacks.delete(stack_name)
+ while stack_name in exists:
+ time.sleep(10)
+ stacks = heat.stacks.list()
+ exists = map(lambda x: x.stack_name, stacks)
+ logger.debug("exists_stacks: %s" % exists)
+ logger.info("%s doesn't exist" % stack_name)
diff --git a/legacy/utils/transform/__init__.py b/legacy/utils/transform/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/legacy/utils/transform/__init__.py diff --git a/legacy/utils/transform/dpi_transform.py b/legacy/utils/transform/dpi_transform.py new file mode 100644 index 00000000..ee29d8e2 --- /dev/null +++ b/legacy/utils/transform/dpi_transform.py @@ -0,0 +1,47 @@ +import os +import pickle +import datetime + +sum_dpi_pps = float(0) +sum_dpi_bps = float(0) + +for x in range(1, 11): + dpi_result_pps = float( + os.popen( + "cat $HOME/qtip_result/dpi_dump.txt | grep 'nDPI throughput:' | awk 'NR=='" + + str(x) + + " | awk '{print $3}'").read().lstrip()) + dpi_result_bps = float( + os.popen( + "cat $HOME/qtip_result/dpi_dump.txt | grep 'nDPI throughput:' | awk 'NR=='" + + str(x) + + " | awk '{print $7}'").read().rstrip()) + + if (dpi_result_pps > 100): + dpi_result_pps = dpi_result_pps / 1000 + + if (dpi_result_bps > 100): + dpi_result_bps = dpi_result_bps / 1000 + + sum_dpi_pps += dpi_result_pps + sum_dpi_bps += dpi_result_bps + +dpi_result_pps = sum_dpi_pps / 10 +dpi_result_bps = sum_dpi_bps / 10 + +host = os.popen("hostname").read().rstrip() +log_time_stamp = str(datetime.datetime.utcnow().isoformat()) + +os.popen( + "cat $HOME/qtip_result/dpi_dump.txt > $HOME/qtip_result/" + + host + + "-" + + log_time_stamp + + ".log") + +home_dir = str(os.popen("echo $HOME").read().rstrip()) +host = os.popen("echo $HOSTNAME") +result = {'pps': round(dpi_result_pps, 3), + 'bps': round(dpi_result_bps, 3)} +with open('./result_temp', 'w+') as result_file: + pickle.dump(result, result_file) diff --git a/legacy/utils/transform/final_report.py b/legacy/utils/transform/final_report.py new file mode 100644 index 00000000..274742d4 --- /dev/null +++ b/legacy/utils/transform/final_report.py @@ -0,0 +1,24 @@ +import pickle +import json +import datetime +import os +import sys + +home_dir = str((os.popen("echo $HOME").read().rstrip())) + +with open('./sys_info_temp', 'r') as sys_info_f: + sys_info_dict = pickle.load(sys_info_f) +with open('./result_temp', 'r') as result_f: + result_dict = pickle.load(result_f) + +host_name = (os.popen("hostname").read().rstrip()) +benchmark_name = str(sys.argv[1]) +testcase_name = str(sys.argv[2]) +report_time_stamp = str(datetime.datetime.utcnow().isoformat()) +final_dict = {"name": testcase_name, + "time": report_time_stamp, + "system_information": sys_info_dict, + "details": result_dict} + +with open('./' + host_name + '-' + report_time_stamp + '.json', 'w+') as result_json: + json.dump(final_dict, result_json, indent=4, sort_keys=True) diff --git a/legacy/utils/transform/fio_transform.py b/legacy/utils/transform/fio_transform.py new file mode 100755 index 00000000..5ecac823 --- /dev/null +++ b/legacy/utils/transform/fio_transform.py @@ -0,0 +1,29 @@ +import json +import pickle +import os +import datetime + + +def get_fio_job_result(fio_job_data): + return {'read': {'io_bytes': fio_job_data["read"]["io_bytes"], + 'io_ps': fio_job_data["read"]["iops"], + 'io_runtime_millisec': fio_job_data["read"]["runtime"], + 'mean_io_latenchy_microsec': fio_job_data["read"]["lat"]["mean"]}, + 'write': {'io_bytes': fio_job_data["write"]["io_bytes"], + 'io_ps': fio_job_data["write"]["iops"], + 'io_runtime_millisec': fio_job_data["write"]["runtime"], + 'mean_io_latenchy_microsec': fio_job_data["write"]["lat"]["mean"]}} + + +with open("fio_result.json") as fio_raw: + fio_data = json.load(fio_raw) + +fio_result_dict = {} +for x, result in enumerate(map(get_fio_job_result, fio_data["jobs"])): + fio_result_dict['job_{0}'.format(x)] = result + +host_name = (os.popen("hostname").read().rstrip()) +report_time = str(datetime.datetime.utcnow().isoformat()) +os.system("mv fio_result.json " + str(host_name) + "-" + report_time + ".log") +with open('./result_temp', 'w + ')as out_fio_result: + pickle.dump(fio_result_dict, out_fio_result) diff --git a/legacy/utils/transform/iperf_transform.py b/legacy/utils/transform/iperf_transform.py new file mode 100644 index 00000000..b52e4634 --- /dev/null +++ b/legacy/utils/transform/iperf_transform.py @@ -0,0 +1,27 @@ +import json
+import datetime
+import pickle
+with open('iperf_raw.json', 'r') as ifile:
+ raw_iperf_data = json.loads(ifile.read().rstrip())
+
+bits_sent = raw_iperf_data['end']['sum_sent']['bits_per_second']
+bits_received = raw_iperf_data['end']['sum_received']['bits_per_second']
+total_byte_sent = raw_iperf_data['end']['sum_sent']['bytes']
+total_byte_received = raw_iperf_data['end']['sum_received']['bytes']
+cpu_host_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['host_total']
+cpu_remote_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['remote_total']
+
+time_stamp = str(datetime.datetime.utcnow().isoformat())
+
+result = {'version': raw_iperf_data['start']['version'],
+ 'bandwidth': {'sender_throughput': bits_sent,
+ 'received_throughput': bits_received},
+ 'cpu': {'cpu_host': cpu_host_total_percent,
+ 'cpu_remote': cpu_remote_total_percent}
+ }
+
+with open('iperf_raw-' + time_stamp + '.log', 'w+') as ofile:
+ ofile.write(json.dumps(raw_iperf_data))
+
+with open('./result_temp', 'w+') as result_file:
+ pickle.dump(result, result_file)
diff --git a/legacy/utils/transform/ramspeed_transform.py b/legacy/utils/transform/ramspeed_transform.py new file mode 100644 index 00000000..960f84fc --- /dev/null +++ b/legacy/utils/transform/ramspeed_transform.py @@ -0,0 +1,41 @@ +import os +import pickle +import datetime + +intmem_copy = os.popen("cat Intmem | grep 'BatchRun Copy' | awk '{print $4}'").read().rstrip() +intmem_scale = os.popen("cat Intmem | grep 'BatchRun Scale' | awk '{print $4}'").read().rstrip() +intmem_add = os.popen("cat Intmem | grep 'BatchRun Add' | awk '{print $4}'").read().rstrip() +intmem_triad = os.popen("cat Intmem | grep 'BatchRun Triad' | awk '{print $4}'").read().rstrip() +intmem_average = os.popen("cat Intmem | grep 'BatchRun AVERAGE' | awk '{print $4}'").read().rstrip() + +print intmem_copy +print intmem_average + +floatmem_copy = os.popen("cat Floatmem | grep 'BatchRun Copy' | awk '{print $4}'").read().rstrip() +floatmem_scale = os.popen("cat Floatmem | grep 'BatchRun Scale' | awk '{print $4}'").read().rstrip() +floatmem_add = os.popen("cat Floatmem | grep 'BatchRun Add' | awk '{print $4}'").read().rstrip() +floatmem_triad = os.popen("cat Floatmem | grep 'BatchRun Triad' | awk '{print $4}'").read().rstrip() +floatmem_average = os.popen("cat Floatmem | grep 'BatchRun AVERAGE' | awk '{print $4}'").read().rstrip() + +print floatmem_copy +print floatmem_average + +hostname = os.popen("hostname").read().rstrip() +time_stamp = str(datetime.datetime.utcnow().isoformat()) + +os.system("mv Intmem " + hostname + "-" + time_stamp + ".log") +os.system("cp Floatmem >> " + hostname + "-" + time_stamp + ".log") + +result = {"int_bandwidth": {"copy": intmem_copy, + "add": intmem_add, + "scale": intmem_scale, + "triad": intmem_triad, + "average": intmem_average}, + "float_bandwidth": {"copy": floatmem_copy, + "add": floatmem_add, + "scale": floatmem_scale, + "triad": floatmem_triad, + "average": floatmem_average}} + +with open('./result_temp', 'w+') as result_file: + pickle.dump(result, result_file) diff --git a/legacy/utils/transform/ssl_transform.py b/legacy/utils/transform/ssl_transform.py new file mode 100644 index 00000000..de84d24b --- /dev/null +++ b/legacy/utils/transform/ssl_transform.py @@ -0,0 +1,54 @@ +import os +import pickle +import datetime + +openssl_version = os.popen("cat RSA_dump | head -1").read().rstrip() +rsa_512_sps = os.popen( + "cat RSA_dump | grep '512 bits ' | awk '{print $6}' ").read().rstrip() +rsa_512_vps = os.popen( + "cat RSA_dump | grep '512 bits ' | awk '{print $7}' ").read().rstrip() +rsa_1024_sps = os.popen( + "cat RSA_dump | grep '1024 bits ' | awk '{print $6}' ").read().rstrip() +rsa_1024_vps = os.popen( + "cat RSA_dump | grep '1024 bits ' | awk '{print $7}' ").read().rstrip() +rsa_2048_sps = os.popen( + "cat RSA_dump | grep '2048 bits ' | awk '{print $6}' ").read().rstrip() +rsa_2048_vps = os.popen( + "cat RSA_dump | grep '2048 bits ' | awk '{print $7}' ").read().rstrip() +rsa_4096_sps = os.popen( + "cat RSA_dump | grep '4096 bits ' | awk '{print $6}' ").read().rstrip() +rsa_4096_vps = os.popen( + "cat RSA_dump | grep '4096 bits ' | awk '{print $7}' ").read().rstrip() + +aes_16B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $2}' ").read().rstrip() +aes_64B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $3}' ").read().rstrip() +aes_256B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $4}' ").read().rstrip() +aes_1024B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $5}' ").read().rstrip() +aes_8192B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $6}' ").read().rstrip() + +hostname = os.popen("hostname").read().rstrip() +time_stamp = str(datetime.datetime.utcnow().isoformat()) + +os.system("mv RSA_dump " + hostname + "-" + time_stamp + ".log") +os.system("cat AES-128-CBC_dump >> " + hostname + "-" + time_stamp + ".log") + +result = {"version": [openssl_version], + "rsa_sig": {"512_bits": rsa_512_sps, + "1024_bits": rsa_1024_sps, + "2048_bits": rsa_2048_sps, + "4096_bits": rsa_4096_sps, + "unit": "sig/sec"}, + "aes_128_cbc": {"16B_block": aes_16B, + "64B_block": aes_64B, + "256B_block": aes_256B, + "1024B_block": aes_1024B, + "8192B_block": aes_8192B, + "unit": "B/sec"}} + +with open('./result_temp', 'w+') as result_file: + pickle.dump(result, result_file) diff --git a/legacy/utils/transform/ubench_transform.py b/legacy/utils/transform/ubench_transform.py new file mode 100644 index 00000000..ab5fe171 --- /dev/null +++ b/legacy/utils/transform/ubench_transform.py @@ -0,0 +1,32 @@ +import os +import json +import pickle + +total_cpu = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'of tests' | awk '{print $1;}' | awk 'NR==1'").read().rstrip() + +cpu_1 = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'of tests' | awk '{print $6;}' | awk 'NR==1'").read().rstrip() + + +cpu_2 = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'of tests' | awk '{print $6;}' | awk 'NR==2'").read().rstrip() + + +index_1 = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'Index Score (Partial Only) ' | awk '{print $7;}' | awk 'NR==1'").read().rstrip() +index_2 = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'Index Score (Partial Only) ' | awk '{print $7;}' | awk 'NR==2'").read().rstrip() + + +result = {"n_cpu": total_cpu, + "single": {"n_para_test": cpu_1, + "score": index_1}, + "multi": {"n_para_test": cpu_2, + "score": index_2} + } + +with open('result_temp', 'w+') as result_file: + pickle.dump(result, result_file) +print json.dumps(result, indent=4, sort_keys=True) +# print result.items() |