diff options
Diffstat (limited to 'testsuites/posca/testcase_script')
-rw-r--r-- | testsuites/posca/testcase_script/posca_factor_ping.py | 11 | ||||
-rw-r--r-- | testsuites/posca/testcase_script/posca_factor_system_bandwidth.py | 101 |
2 files changed, 70 insertions, 42 deletions
diff --git a/testsuites/posca/testcase_script/posca_factor_ping.py b/testsuites/posca/testcase_script/posca_factor_ping.py index ae30417b..3a2277cf 100644 --- a/testsuites/posca/testcase_script/posca_factor_ping.py +++ b/testsuites/posca/testcase_script/posca_factor_ping.py @@ -53,7 +53,7 @@ def env_pre(test_config): stack_prepare._prepare_env_daemon(test_yardstick) quota_prepare.quota_env_prepare() cmd = ('yardstick env prepare') - LOG.info("yardstick envrionment prepare!") + LOG.info("yardstick environment prepare!") if(test_config["contexts"]['yardstick_envpre']): yardstick_container = docker_env.yardstick_info['container'] stdout = docker_env.docker_exec_cmd(yardstick_container, cmd) @@ -69,10 +69,10 @@ def do_test(): stdout = docker_env.docker_exec_cmd(yardstick_container, cmd) LOG.info(stdout) out_value = 0 - loop_walue = 0 - while loop_walue < 60: + loop_value = 0 + while loop_value < 60: time.sleep(2) - loop_walue = loop_walue + 1 + loop_value = loop_value + 1 with open(out_file) as f: data = json.load(f) if data["status"] == 1: @@ -119,9 +119,8 @@ def run(test_config): LOG.info("Create Dashboard data") DashBoard.posca_stress_ping(test_config["contexts"]) - LOG.info("bottlenecks envrionment prepare!") env_pre(test_config) - LOG.info("yardstick envrionment prepare done!") + LOG.info("yardstick environment prepare done!") for value in test_num: result = [] diff --git a/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py b/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py index 01c5dab5..05ea61e2 100644 --- a/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py +++ b/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py @@ -14,10 +14,13 @@ This test is using yardstick as a tool to begin test.''' import os import time +import uuid +import json import utils.logger as log -import utils.infra_setup.runner.yardstick as Runner from utils.parser import Parser as conf_parser +import utils.env_prepare.stack_prepare as stack_prepare import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard +import utils.infra_setup.runner.docker_env as docker_env # -------------------------------------------------- # logging configuration # -------------------------------------------------- @@ -37,40 +40,76 @@ testcase, file_format = os.path.splitext(testfile) def env_pre(con_dic): - Runner.Create_Incluxdb(con_dic['runner_config']) + LOG.info("yardstick environment prepare!") + stack_prepare._prepare_env_daemon(True) def config_to_result(test_config, test_result): testdata = {} - test_result["throughput"] = float(test_result["throughput"]) + parser_result = test_result["benchmark"]["data"] test_result.update(test_config) + test_result.update(parser_result) + test_result["throughput"] = float(test_result["throughput"]) + test_result["remote_cpu_util"] = float(test_result["remote_cpu_util"]) + test_result["local_cpu_util"] = float(test_result["local_cpu_util"]) + test_result["mean_latency"] = float(test_result["mean_latency"]) testdata["data_body"] = test_result testdata["testcase"] = testcase return testdata -def do_test(test_config, con_dic): - test_dict['args']['opts']['task-args'] = test_config - Task_id = Runner.Send_Data(test_dict, con_dic['runner_config']) - time.sleep(con_dic['test_config']['test_time']) - Data_Reply = Runner.Get_Reply(con_dic['runner_config'], Task_id) - try: - test_date =\ - Data_Reply[con_dic['runner_config']['yardstick_testcase']][0] - except IndexError: - test_date = do_test(test_config, con_dic) +def testcase_parser(out_file="yardstick.out", **parameter_info): + cmd = ('yardstick task start /home/opnfv/repos/yardstick/' + 'samples/netperf_bottlenecks.yaml --output-file ' + out_file) + cmd = cmd + " --task-args " + '"' + str(parameter_info) + '"' + LOG.info("yardstick test cmd is: %s" % cmd) + return cmd + + +def do_test(test_config, Use_Dashboard, context_conf): + yardstick_container = docker_env.yardstick_info['container'] + out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out") + cmd = testcase_parser(out_file=out_file, **test_config) + stdout = docker_env.docker_exec_cmd(yardstick_container, cmd) + LOG.info(stdout) + loop_value = 0 + while loop_value < 60: + time.sleep(2) + loop_value = loop_value + 1 + with open(out_file) as f: + data = json.load(f) + if data["status"] == 1: + LOG.info("yardstick run success") + break + elif data["status"] == 2: + LOG.error("yardstick error exit") + exit() - save_data = config_to_result(test_config, test_date) - if con_dic['runner_config']['dashboard'] == 'y': - DashBoard.dashboard_send_data(con_dic['runner_config'], save_data) + save_data = config_to_result(test_config, data['result'][1]) + if Use_Dashboard is True: + DashBoard.dashboard_send_data(context_conf, save_data) return save_data["data_body"] -def run(con_dic): +def run(test_config): + con_dic = test_config["load_manager"] + env_pre(None) + if test_config["contexts"]["yardstick_ip"] is None: + con_dic["contexts"]["yardstick_ip"] =\ + conf_parser.ip_parser("yardstick_test_ip") + + if "dashboard" in test_config["contexts"].keys(): + if test_config["contexts"]["dashboard_ip"] is None: + test_config["contexts"]["dashboard_ip"] =\ + conf_parser.ip_parser("dashboard") + LOG.info("Create Dashboard data") + Use_Dashboard = True + DashBoard.dashboard_system_bandwidth(test_config["contexts"]) + data = {} - rx_pkt_a = con_dic['test_config']['rx_pkt_sizes'].split(',') - tx_pkt_a = con_dic['test_config']['tx_pkt_sizes'].split(',') + rx_pkt_a = con_dic['scenarios']['rx_pkt_sizes'].split(',') + tx_pkt_a = con_dic['scenarios']['tx_pkt_sizes'].split(',') data["rx_pkt_sizes"] = rx_pkt_a data["tx_pkt_sizes"] = tx_pkt_a con_dic["result_file"] = os.path.dirname( @@ -82,30 +121,20 @@ def run(con_dic): data_max = {} data_return["throughput"] = 1 - if con_dic["runner_config"]["yardstick_test_ip"] is None: - con_dic["runner_config"]["yardstick_test_ip"] =\ - conf_parser.ip_parser("yardstick_test_ip") - - env_pre(con_dic) - - if con_dic["runner_config"]["dashboard"] == 'y': - if con_dic["runner_config"]["dashboard_ip"] is None: - con_dic["runner_config"]["dashboard_ip"] =\ - conf_parser.ip_parser("dashboard") - LOG.info("Create Dashboard data") - DashBoard.dashboard_system_bandwidth(con_dic["runner_config"]) - for test_x in data["tx_pkt_sizes"]: data_max["throughput"] = 1 bandwidth_tmp = 1 for test_y in data["rx_pkt_sizes"]: - test_config = { + case_config = { "tx_msg_size": float(test_x), "rx_msg_size": float(test_y), - "test_time": con_dic['test_config']['test_time'] + "test_time": con_dic['scenarios']['test_times'], + "pod_info": conf_parser.bottlenecks_config["pod_info"] } - data_reply = do_test(test_config, con_dic) - conf_parser.result_to_file(data_reply, con_dic["out_file"]) + data_reply = do_test(case_config, Use_Dashboard, + test_config["contexts"]) + + conf_parser.result_to_file(data_reply, test_config["out_file"]) bandwidth = data_reply["throughput"] if (data_max["throughput"] < bandwidth): data_max = data_reply |