summaryrefslogtreecommitdiffstats
path: root/testsuites/posca/testcase_script
diff options
context:
space:
mode:
Diffstat (limited to 'testsuites/posca/testcase_script')
-rw-r--r--testsuites/posca/testcase_script/posca_factor_ping.py11
-rw-r--r--testsuites/posca/testcase_script/posca_factor_system_bandwidth.py101
-rw-r--r--testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py125
-rw-r--r--testsuites/posca/testcase_script/posca_factor_vnf_scale_up.py132
4 files changed, 327 insertions, 42 deletions
diff --git a/testsuites/posca/testcase_script/posca_factor_ping.py b/testsuites/posca/testcase_script/posca_factor_ping.py
index ae30417b..3a2277cf 100644
--- a/testsuites/posca/testcase_script/posca_factor_ping.py
+++ b/testsuites/posca/testcase_script/posca_factor_ping.py
@@ -53,7 +53,7 @@ def env_pre(test_config):
stack_prepare._prepare_env_daemon(test_yardstick)
quota_prepare.quota_env_prepare()
cmd = ('yardstick env prepare')
- LOG.info("yardstick envrionment prepare!")
+ LOG.info("yardstick environment prepare!")
if(test_config["contexts"]['yardstick_envpre']):
yardstick_container = docker_env.yardstick_info['container']
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
@@ -69,10 +69,10 @@ def do_test():
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
LOG.info(stdout)
out_value = 0
- loop_walue = 0
- while loop_walue < 60:
+ loop_value = 0
+ while loop_value < 60:
time.sleep(2)
- loop_walue = loop_walue + 1
+ loop_value = loop_value + 1
with open(out_file) as f:
data = json.load(f)
if data["status"] == 1:
@@ -119,9 +119,8 @@ def run(test_config):
LOG.info("Create Dashboard data")
DashBoard.posca_stress_ping(test_config["contexts"])
- LOG.info("bottlenecks envrionment prepare!")
env_pre(test_config)
- LOG.info("yardstick envrionment prepare done!")
+ LOG.info("yardstick environment prepare done!")
for value in test_num:
result = []
diff --git a/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py b/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
index 01c5dab5..05ea61e2 100644
--- a/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
+++ b/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
@@ -14,10 +14,13 @@ This test is using yardstick as a tool to begin test.'''
import os
import time
+import uuid
+import json
import utils.logger as log
-import utils.infra_setup.runner.yardstick as Runner
from utils.parser import Parser as conf_parser
+import utils.env_prepare.stack_prepare as stack_prepare
import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
+import utils.infra_setup.runner.docker_env as docker_env
# --------------------------------------------------
# logging configuration
# --------------------------------------------------
@@ -37,40 +40,76 @@ testcase, file_format = os.path.splitext(testfile)
def env_pre(con_dic):
- Runner.Create_Incluxdb(con_dic['runner_config'])
+ LOG.info("yardstick environment prepare!")
+ stack_prepare._prepare_env_daemon(True)
def config_to_result(test_config, test_result):
testdata = {}
- test_result["throughput"] = float(test_result["throughput"])
+ parser_result = test_result["benchmark"]["data"]
test_result.update(test_config)
+ test_result.update(parser_result)
+ test_result["throughput"] = float(test_result["throughput"])
+ test_result["remote_cpu_util"] = float(test_result["remote_cpu_util"])
+ test_result["local_cpu_util"] = float(test_result["local_cpu_util"])
+ test_result["mean_latency"] = float(test_result["mean_latency"])
testdata["data_body"] = test_result
testdata["testcase"] = testcase
return testdata
-def do_test(test_config, con_dic):
- test_dict['args']['opts']['task-args'] = test_config
- Task_id = Runner.Send_Data(test_dict, con_dic['runner_config'])
- time.sleep(con_dic['test_config']['test_time'])
- Data_Reply = Runner.Get_Reply(con_dic['runner_config'], Task_id)
- try:
- test_date =\
- Data_Reply[con_dic['runner_config']['yardstick_testcase']][0]
- except IndexError:
- test_date = do_test(test_config, con_dic)
+def testcase_parser(out_file="yardstick.out", **parameter_info):
+ cmd = ('yardstick task start /home/opnfv/repos/yardstick/'
+ 'samples/netperf_bottlenecks.yaml --output-file ' + out_file)
+ cmd = cmd + " --task-args " + '"' + str(parameter_info) + '"'
+ LOG.info("yardstick test cmd is: %s" % cmd)
+ return cmd
+
+
+def do_test(test_config, Use_Dashboard, context_conf):
+ yardstick_container = docker_env.yardstick_info['container']
+ out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+ cmd = testcase_parser(out_file=out_file, **test_config)
+ stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ LOG.info(stdout)
+ loop_value = 0
+ while loop_value < 60:
+ time.sleep(2)
+ loop_value = loop_value + 1
+ with open(out_file) as f:
+ data = json.load(f)
+ if data["status"] == 1:
+ LOG.info("yardstick run success")
+ break
+ elif data["status"] == 2:
+ LOG.error("yardstick error exit")
+ exit()
- save_data = config_to_result(test_config, test_date)
- if con_dic['runner_config']['dashboard'] == 'y':
- DashBoard.dashboard_send_data(con_dic['runner_config'], save_data)
+ save_data = config_to_result(test_config, data['result'][1])
+ if Use_Dashboard is True:
+ DashBoard.dashboard_send_data(context_conf, save_data)
return save_data["data_body"]
-def run(con_dic):
+def run(test_config):
+ con_dic = test_config["load_manager"]
+ env_pre(None)
+ if test_config["contexts"]["yardstick_ip"] is None:
+ con_dic["contexts"]["yardstick_ip"] =\
+ conf_parser.ip_parser("yardstick_test_ip")
+
+ if "dashboard" in test_config["contexts"].keys():
+ if test_config["contexts"]["dashboard_ip"] is None:
+ test_config["contexts"]["dashboard_ip"] =\
+ conf_parser.ip_parser("dashboard")
+ LOG.info("Create Dashboard data")
+ Use_Dashboard = True
+ DashBoard.dashboard_system_bandwidth(test_config["contexts"])
+
data = {}
- rx_pkt_a = con_dic['test_config']['rx_pkt_sizes'].split(',')
- tx_pkt_a = con_dic['test_config']['tx_pkt_sizes'].split(',')
+ rx_pkt_a = con_dic['scenarios']['rx_pkt_sizes'].split(',')
+ tx_pkt_a = con_dic['scenarios']['tx_pkt_sizes'].split(',')
data["rx_pkt_sizes"] = rx_pkt_a
data["tx_pkt_sizes"] = tx_pkt_a
con_dic["result_file"] = os.path.dirname(
@@ -82,30 +121,20 @@ def run(con_dic):
data_max = {}
data_return["throughput"] = 1
- if con_dic["runner_config"]["yardstick_test_ip"] is None:
- con_dic["runner_config"]["yardstick_test_ip"] =\
- conf_parser.ip_parser("yardstick_test_ip")
-
- env_pre(con_dic)
-
- if con_dic["runner_config"]["dashboard"] == 'y':
- if con_dic["runner_config"]["dashboard_ip"] is None:
- con_dic["runner_config"]["dashboard_ip"] =\
- conf_parser.ip_parser("dashboard")
- LOG.info("Create Dashboard data")
- DashBoard.dashboard_system_bandwidth(con_dic["runner_config"])
-
for test_x in data["tx_pkt_sizes"]:
data_max["throughput"] = 1
bandwidth_tmp = 1
for test_y in data["rx_pkt_sizes"]:
- test_config = {
+ case_config = {
"tx_msg_size": float(test_x),
"rx_msg_size": float(test_y),
- "test_time": con_dic['test_config']['test_time']
+ "test_time": con_dic['scenarios']['test_times'],
+ "pod_info": conf_parser.bottlenecks_config["pod_info"]
}
- data_reply = do_test(test_config, con_dic)
- conf_parser.result_to_file(data_reply, con_dic["out_file"])
+ data_reply = do_test(case_config, Use_Dashboard,
+ test_config["contexts"])
+
+ conf_parser.result_to_file(data_reply, test_config["out_file"])
bandwidth = data_reply["throughput"]
if (data_max["throughput"] < bandwidth):
data_max = data_reply
diff --git a/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py b/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py
new file mode 100644
index 00000000..2241d02f
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+"""This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test."""
+
+import os
+import time
+import utils.logger as log
+import utils.infra_setup.runner.yardstick as Runner
+from utils.parser import Parser as conf_parser
+import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+
+
+def env_pre(con_dic):
+ Runner.Create_Incluxdb(con_dic['runner_config'])
+
+
+def config_to_result(test_config, test_result):
+ testdata = {}
+ test_result["throughput"] = float(test_result["throughput"])
+ test_result.update(test_config)
+ testdata["data_body"] = test_result
+ testdata["testcase"] = testcase
+ return testdata
+
+
+def do_test(test_config, con_dic):
+ test_case = con_dic['runner_config']['yardstick_testcase']
+ test_dict = {
+ "action": "runTestCase",
+ "args": {
+ "opts": {
+ "task-args": test_config
+ },
+ "testcase": test_case
+ }
+ }
+ Task_id = Runner.Send_Data(test_dict, con_dic['runner_config'])
+ time.sleep(con_dic['test_config']['test_time'])
+ Data_Reply = Runner.Get_Reply(con_dic['runner_config'], Task_id)
+ try:
+ test_date =\
+ Data_Reply[con_dic['runner_config']['yardstick_testcase']][0]
+ except IndexError:
+ test_date = do_test(test_config, con_dic)
+
+ save_data = config_to_result(test_config, test_date)
+ if con_dic['runner_config']['dashboard'] == 'y':
+ DashBoard.dashboard_send_data(con_dic['runner_config'], save_data)
+
+ return save_data["data_body"]
+
+
+def run(con_dic):
+ # can we specify these ranges from command line?
+ low, high = con_dic['test_config']['num_vnfs']
+ data = {
+ "num_vnfs": range(low, high)
+ }
+ con_dic["result_file"] = os.path.dirname(
+ os.path.abspath(__file__)) + "/test_case/result"
+ pre_role_result = 1
+ data_return = {}
+ data_max = {}
+ data_return["throughput"] = 1
+
+ if con_dic["runner_config"]["yardstick_test_ip"] is None:
+ con_dic["runner_config"]["yardstick_test_ip"] =\
+ conf_parser.ip_parser("yardstick_test_ip")
+
+ env_pre(con_dic)
+
+ if con_dic["runner_config"]["dashboard"] == 'y':
+ if con_dic["runner_config"]["dashboard_ip"] is None:
+ con_dic["runner_config"]["dashboard_ip"] =\
+ conf_parser.ip_parser("dashboard")
+ LOG.info("Create Dashboard data")
+ DashBoard.dashboard_system_bandwidth(con_dic["runner_config"])
+
+ bandwidth_tmp = 1
+ # vcpus and mem are scaled together
+ for num_vnfs in data["scale_up_values"]:
+ data_max["throughput"] = 1
+ test_config = {
+ "num_vnfs": num_vnfs,
+ "test_time": con_dic['test_config']['test_time']
+ }
+ data_reply = do_test(test_config, con_dic)
+ conf_parser.result_to_file(data_reply, con_dic["out_file"])
+ # TODO: figure out which KPI to use
+ bandwidth = data_reply["throughput"]
+ if data_max["throughput"] < bandwidth:
+ data_max = data_reply
+ if abs(bandwidth_tmp - bandwidth) / float(bandwidth_tmp) < 0.025:
+ LOG.info("this group of data has reached top output")
+ break
+ else:
+ pre_reply = data_reply
+ bandwidth_tmp = bandwidth
+ cur_role_result = float(pre_reply["throughput"])
+ if (abs(pre_role_result - cur_role_result) /
+ float(pre_role_result) < 0.025):
+ LOG.info("The performance increases slowly")
+ if data_return["throughput"] < data_max["throughput"]:
+ data_return = data_max
+ pre_role_result = cur_role_result
+ LOG.info("Find bottlenecks of this config")
+ LOG.info("The max data is %d", data_return["throughput"])
+ return data_return
diff --git a/testsuites/posca/testcase_script/posca_factor_vnf_scale_up.py b/testsuites/posca/testcase_script/posca_factor_vnf_scale_up.py
new file mode 100644
index 00000000..a61104ff
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_factor_vnf_scale_up.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+"""This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test."""
+
+import os
+import time
+import utils.logger as log
+import utils.infra_setup.runner.yardstick as Runner
+from utils.parser import Parser as conf_parser
+import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+
+
+def env_pre(con_dic):
+ Runner.Create_Incluxdb(con_dic['runner_config'])
+
+
+def config_to_result(test_config, test_result):
+ testdata = {}
+ test_result["throughput"] = float(test_result["throughput"])
+ test_result.update(test_config)
+ testdata["data_body"] = test_result
+ testdata["testcase"] = testcase
+ return testdata
+
+
+def do_test(test_config, con_dic):
+ # this will change
+ test_case = con_dic['runner_config']['yardstick_testcase']
+ test_dict = {
+ "action": "runTestCase",
+ "args": {
+ "opts": {
+ "task-args": test_config
+ },
+ "testcase": test_case
+ }
+ }
+ Task_id = Runner.Send_Data(test_dict, con_dic['runner_config'])
+ time.sleep(con_dic['test_config']['test_time'])
+ Data_Reply = Runner.Get_Reply(con_dic['runner_config'], Task_id)
+ try:
+ test_date =\
+ Data_Reply[con_dic['runner_config']['yardstick_testcase']][0]
+ except IndexError:
+ test_date = do_test(test_config, con_dic)
+
+ save_data = config_to_result(test_config, test_date)
+ if con_dic['runner_config']['dashboard'] == 'y':
+ DashBoard.dashboard_send_data(con_dic['runner_config'], save_data)
+
+ return save_data["data_body"]
+
+
+def run(con_dic):
+ s = con_dic['test_config']['scale_up_values']
+
+ scale_up_values = [
+ (c, m * s['mem_unit']) for c in
+ range(s['cpus_min'], s['cpus_max'], s['cpus_incr'])
+ for m in range(s['mem_min'], s['mem_max'], s['mem_incr'])
+ ]
+ data = {
+ "scale_up_values": scale_up_values
+ }
+ con_dic["result_file"] = os.path.dirname(
+ os.path.abspath(__file__)) + "/test_case/result"
+ pre_role_result = 1
+ data_return = {}
+ data_max = {}
+ data_return["throughput"] = 1
+
+ if con_dic["runner_config"]["yardstick_test_ip"] is None:
+ con_dic["runner_config"]["yardstick_test_ip"] =\
+ conf_parser.ip_parser("yardstick_test_ip")
+
+ env_pre(con_dic)
+
+ if con_dic["runner_config"]["dashboard"] == 'y':
+ if con_dic["runner_config"]["dashboard_ip"] is None:
+ con_dic["runner_config"]["dashboard_ip"] =\
+ conf_parser.ip_parser("dashboard")
+ LOG.info("Create Dashboard data")
+ DashBoard.dashboard_system_bandwidth(con_dic["runner_config"])
+
+ bandwidth_tmp = 1
+ # vcpus and mem are scaled together
+ for vcpus, mem in data["scale_up_values"]:
+ data_max["throughput"] = 1
+ test_config = {
+ "vcpus": vcpus,
+ "mem": mem,
+ "test_time": con_dic['test_config']['test_time']
+ }
+ data_reply = do_test(test_config, con_dic)
+ conf_parser.result_to_file(data_reply, con_dic["out_file"])
+ # TODO: figure out which KPI to use
+ bandwidth = data_reply["throughput"]
+ if data_max["throughput"] < bandwidth:
+ data_max = data_reply
+ if abs(bandwidth_tmp - bandwidth) / float(bandwidth_tmp) < 0.025:
+ LOG.info("this group of data has reached top output")
+ break
+ else:
+ pre_reply = data_reply
+ bandwidth_tmp = bandwidth
+ cur_role_result = float(pre_reply["throughput"])
+ if (abs(pre_role_result - cur_role_result) /
+ float(pre_role_result) < 0.025):
+ LOG.info("The performance increases slowly")
+ if data_return["throughput"] < data_max["throughput"]:
+ data_return = data_max
+ pre_role_result = cur_role_result
+ LOG.info("Find bottlenecks of this config")
+ LOG.info("The max data is %d", data_return["throughput"])
+ return data_return