summaryrefslogtreecommitdiffstats
path: root/testsuites
diff options
context:
space:
mode:
Diffstat (limited to 'testsuites')
-rw-r--r--testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml35
-rw-r--r--testsuites/posca/testcase_cfg/posca_feature_vnf_scale_out.yaml (renamed from testsuites/posca/testcase_cfg/posca_factor_vnf_scale_out.yaml)25
-rw-r--r--testsuites/posca/testcase_dashboard/posca_stress_ping.py2
-rwxr-xr-xtestsuites/posca/testcase_dashboard/posca_vnf_scale_out.py35
-rwxr-xr-xtestsuites/posca/testcase_dashboard/system_bandwidth.py2
-rw-r--r--testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py125
-rw-r--r--testsuites/posca/testcase_script/posca_feature_moon_tenants.py166
-rw-r--r--testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py151
-rw-r--r--testsuites/run_testsuite.py2
9 files changed, 406 insertions, 137 deletions
diff --git a/testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml b/testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml
new file mode 100644
index 00000000..3b621a99
--- /dev/null
+++ b/testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+load_manager:
+ scenarios:
+ tool: https request
+ # info that the cpus and memes have the same number of data.
+ resources: 20
+ initial: 0
+ threshhold: 5
+ timeout: 30
+ SLA: 5
+
+
+ runners:
+ stack_create: yardstick
+ Debug: False
+ yardstick_test_dir: "samples"
+ yardstick_testcase: "bottlenecks_moon_tenants"
+
+ runner_exta:
+ # info this section is for yardstick do some exta env prepare.
+ installation_method: yardstick
+ installation_type: testpmd
+
+contexts:
+ # info that dashboard if have data, we will create the data dashboard.
+ dashboard: "Bottlenecks-ELK"
+ yardstick: "Bottlenecks-yardstick" \ No newline at end of file
diff --git a/testsuites/posca/testcase_cfg/posca_factor_vnf_scale_out.yaml b/testsuites/posca/testcase_cfg/posca_feature_vnf_scale_out.yaml
index 84bde99d..d893ac8a 100644
--- a/testsuites/posca/testcase_cfg/posca_factor_vnf_scale_out.yaml
+++ b/testsuites/posca/testcase_cfg/posca_feature_vnf_scale_out.yaml
@@ -7,12 +7,19 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-test_config:
- num_vnfs: [1, 40]
-runner_config:
- dashboard: "y"
- dashboard_ip:
- stack_create: yardstick
- yardstick_test_ip:
- yardstick_test_dir: "samples/vnf_samples/nsut/acl"
- yardstick_testcase: "tc_heat_rfc2544_ipv4_1rule_1flow_64B_packetsize_scale_out.yaml"
+load_manager:
+ scenarios:
+ number_vnfs: 1, 2, 4
+ iterations: 10
+ interval: 35
+
+ runners:
+ stack_create: yardstick
+ flavor:
+ yardstick_test_dir: "samples/vnf_samples/nsut/acl"
+ yardstick_testcase: "tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_correlated_traffic_scale_out"
+
+contexts:
+ dashboard: "Bottlenecks-ELK"
+ yardstick: "Bottlenecks_yardstick"
+ yardstick_envpre: False
diff --git a/testsuites/posca/testcase_dashboard/posca_stress_ping.py b/testsuites/posca/testcase_dashboard/posca_stress_ping.py
index 7a5a8fb8..64ce3835 100644
--- a/testsuites/posca/testcase_dashboard/posca_stress_ping.py
+++ b/testsuites/posca/testcase_dashboard/posca_stress_ping.py
@@ -32,7 +32,7 @@ def dashboard_send_data(runner_config, test_data):
doc_type=test_data["testcase"],
body=test_data["data_body"])
if res['created'] == "False":
- LOG.error("date send to kibana have errors ", test_data["data_body"])
+ LOG.error("date send to kibana have errors %s", test_data["data_body"])
def posca_stress_ping(runner_config):
diff --git a/testsuites/posca/testcase_dashboard/posca_vnf_scale_out.py b/testsuites/posca/testcase_dashboard/posca_vnf_scale_out.py
new file mode 100755
index 00000000..6720b7f0
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_vnf_scale_out.py
@@ -0,0 +1,35 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import ConfigParser
+from elasticsearch import Elasticsearch
+import os
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+
+LOG = log.Logger(__name__).getLogger()
+config = ConfigParser.ConfigParser()
+es = Elasticsearch()
+dashboard_path = os.path.join(conf_parser.test_dir,
+ "posca",
+ "testcase_dashboard")
+dashboard_dir = dashboard_path + "/"
+
+
+def dashboard_send_data(runner_config, test_data):
+ global es
+ # es_ip = runner_config['dashboard_ip'].split(':')
+ es = Elasticsearch([{'host': "172.17.0.5"}])
+ for i in test_data:
+ res = es.index(index="bottlenecks",
+ doc_type="vnf_scale_out",
+ body=i)
+ if res['created'] == "False":
+ LOG.error("date send to kibana have errors %s",
+ test_data["data_body"])
diff --git a/testsuites/posca/testcase_dashboard/system_bandwidth.py b/testsuites/posca/testcase_dashboard/system_bandwidth.py
index 4501dee7..5479b670 100755
--- a/testsuites/posca/testcase_dashboard/system_bandwidth.py
+++ b/testsuites/posca/testcase_dashboard/system_bandwidth.py
@@ -31,7 +31,7 @@ def dashboard_send_data(runner_config, test_data):
doc_type=test_data["testcase"],
body=test_data["data_body"])
if res['created'] == "False":
- LOG.error("date send to kibana have errors ", test_data["data_body"])
+ LOG.error("date send to kibana have errors %s", test_data["data_body"])
def dashboard_system_bandwidth(runner_config):
diff --git a/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py b/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py
deleted file mode 100644
index 2241d02f..00000000
--- a/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-"""This file realize the function of run systembandwidth script.
-for example this contain two part first run_script,
-second is algorithm, this part is about how to judge the bottlenecks.
-This test is using yardstick as a tool to begin test."""
-
-import os
-import time
-import utils.logger as log
-import utils.infra_setup.runner.yardstick as Runner
-from utils.parser import Parser as conf_parser
-import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
-# --------------------------------------------------
-# logging configuration
-# --------------------------------------------------
-LOG = log.Logger(__name__).getLogger()
-
-testfile = os.path.basename(__file__)
-testcase, file_format = os.path.splitext(testfile)
-
-
-def env_pre(con_dic):
- Runner.Create_Incluxdb(con_dic['runner_config'])
-
-
-def config_to_result(test_config, test_result):
- testdata = {}
- test_result["throughput"] = float(test_result["throughput"])
- test_result.update(test_config)
- testdata["data_body"] = test_result
- testdata["testcase"] = testcase
- return testdata
-
-
-def do_test(test_config, con_dic):
- test_case = con_dic['runner_config']['yardstick_testcase']
- test_dict = {
- "action": "runTestCase",
- "args": {
- "opts": {
- "task-args": test_config
- },
- "testcase": test_case
- }
- }
- Task_id = Runner.Send_Data(test_dict, con_dic['runner_config'])
- time.sleep(con_dic['test_config']['test_time'])
- Data_Reply = Runner.Get_Reply(con_dic['runner_config'], Task_id)
- try:
- test_date =\
- Data_Reply[con_dic['runner_config']['yardstick_testcase']][0]
- except IndexError:
- test_date = do_test(test_config, con_dic)
-
- save_data = config_to_result(test_config, test_date)
- if con_dic['runner_config']['dashboard'] == 'y':
- DashBoard.dashboard_send_data(con_dic['runner_config'], save_data)
-
- return save_data["data_body"]
-
-
-def run(con_dic):
- # can we specify these ranges from command line?
- low, high = con_dic['test_config']['num_vnfs']
- data = {
- "num_vnfs": range(low, high)
- }
- con_dic["result_file"] = os.path.dirname(
- os.path.abspath(__file__)) + "/test_case/result"
- pre_role_result = 1
- data_return = {}
- data_max = {}
- data_return["throughput"] = 1
-
- if con_dic["runner_config"]["yardstick_test_ip"] is None:
- con_dic["runner_config"]["yardstick_test_ip"] =\
- conf_parser.ip_parser("yardstick_test_ip")
-
- env_pre(con_dic)
-
- if con_dic["runner_config"]["dashboard"] == 'y':
- if con_dic["runner_config"]["dashboard_ip"] is None:
- con_dic["runner_config"]["dashboard_ip"] =\
- conf_parser.ip_parser("dashboard")
- LOG.info("Create Dashboard data")
- DashBoard.dashboard_system_bandwidth(con_dic["runner_config"])
-
- bandwidth_tmp = 1
- # vcpus and mem are scaled together
- for num_vnfs in data["scale_up_values"]:
- data_max["throughput"] = 1
- test_config = {
- "num_vnfs": num_vnfs,
- "test_time": con_dic['test_config']['test_time']
- }
- data_reply = do_test(test_config, con_dic)
- conf_parser.result_to_file(data_reply, con_dic["out_file"])
- # TODO: figure out which KPI to use
- bandwidth = data_reply["throughput"]
- if data_max["throughput"] < bandwidth:
- data_max = data_reply
- if abs(bandwidth_tmp - bandwidth) / float(bandwidth_tmp) < 0.025:
- LOG.info("this group of data has reached top output")
- break
- else:
- pre_reply = data_reply
- bandwidth_tmp = bandwidth
- cur_role_result = float(pre_reply["throughput"])
- if (abs(pre_role_result - cur_role_result) /
- float(pre_role_result) < 0.025):
- LOG.info("The performance increases slowly")
- if data_return["throughput"] < data_max["throughput"]:
- data_return = data_max
- pre_role_result = cur_role_result
- LOG.info("Find bottlenecks of this config")
- LOG.info("The max data is %d", data_return["throughput"])
- return data_return
diff --git a/testsuites/posca/testcase_script/posca_feature_moon_tenants.py b/testsuites/posca/testcase_script/posca_feature_moon_tenants.py
new file mode 100644
index 00000000..8f4061df
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_feature_moon_tenants.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test.'''
+
+import os
+import time
+import uuid
+import json
+import Queue
+import multiprocessing
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.stack_prepare as stack_prepare
+import utils.infra_setup.runner.docker_env as docker_env
+import utils.infra_setup.runner.yardstick as yardstick_task
+
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+# cidr = "/home/opnfv/repos/yardstick/samples/pvp_throughput_bottlenecks.yaml"
+runner_switch = True
+runner_DEBUG = True
+
+
+def env_pre(con_dic):
+ LOG.info("yardstick environment prepare!")
+ stack_prepare._prepare_env_daemon(True)
+
+
+def config_to_result(test_config, test_result):
+ final_data = []
+ print(test_result)
+ out_data = test_result["result"]["testcases"]
+ test_data = out_data["pvp_throughput_bottlenecks"]["tc_data"]
+ for result in test_data:
+ testdata = {}
+ testdata["vcpu"] = test_config["vcpu"]
+ testdata["memory"] = test_config["memory"]
+ testdata["nrFlows"] = result["data"]["nrFlows"]
+ testdata["packet_size"] = result["data"]["packet_size"]
+ testdata["throughput"] = result["data"]["throughput_rx_mbps"]
+ final_data.append(testdata)
+ return final_data
+
+
+def testcase_parser(runner_conf, out_file="yardstick.out", **parameter_info):
+ cidr = "/home/opnfv/repos/yardstick/" + \
+ runner_conf["yardstick_test_dir"] + \
+ runner_conf["yardstick_testcase"]
+ cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+ cidr=cidr,
+ outfile=out_file,
+ parameter=parameter_info)
+ return cmd
+
+
+def do_test(runner_conf, test_config, Use_Dashboard, context_conf):
+ yardstick_container = docker_env.yardstick_info['container']
+ out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+ cmd = testcase_parser(runner_conf, out_file=out_file, **test_config)
+ print(cmd)
+ stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ LOG.info(stdout)
+ loop_value = 0
+ while loop_value < 60:
+ time.sleep(2)
+ loop_value = loop_value + 1
+ with open(out_file) as f:
+ data = json.load(f)
+ if data["status"] == 1:
+ LOG.info("yardstick run success")
+ break
+ elif data["status"] == 2:
+ LOG.error("yardstick error exit")
+ exit()
+ # data = json.load(output)
+
+ save_data = config_to_result(test_config, data)
+ if Use_Dashboard is True:
+ print("use dashboard")
+ # DashBoard.dashboard_send_data(context_conf, save_data)
+
+ # return save_data["data_body"]
+ return save_data
+
+
+def run(test_config):
+ load_config = test_config["load_manager"]
+ scenarios_conf = load_config["scenarios"]
+ runner_conf = test_config["runners"]
+ Use_Dashboard = False
+
+ env_pre(None)
+ if test_config["contexts"]["yardstick_ip"] is None:
+ load_config["contexts"]["yardstick_ip"] =\
+ conf_parser.ip_parser("yardstick_test_ip")
+
+ if "dashboard" in test_config["contexts"].keys():
+ if test_config["contexts"]["dashboard_ip"] is None:
+ test_config["contexts"]["dashboard_ip"] =\
+ conf_parser.ip_parser("dashboard")
+ LOG.info("Create Dashboard data")
+ Use_Dashboard = True
+ # DashBoard.dashboard_system_bandwidth(test_config["contexts"])
+
+ resources = conf_parser.str_to_list(scenarios_conf["resources"])
+ initial = conf_parser.str_to_list(scenarios_conf["initial"])
+ threshhold = conf_parser.str_to_list(scenarios_conf["threshhold"])
+ timeout = conf_parser.str_to_list(scenarios_conf["timeout"])
+ SLA = conf_parser.str_to_list(scenarios_conf["SLA"])
+ case_config = {"SLA": SLA,
+ "resources": resources}
+
+ process_queue = Queue.Queue()
+
+ load_config["result_file"] = os.path.dirname(
+ os.path.abspath(__file__)) + "/test_case/result"
+
+ result = 0
+
+ if initial is 0:
+ tenant_number = threshhold
+ else:
+ tenant_number = initial
+
+ while runner_switch is True:
+ for tenant in range(0, tenant_number):
+ process = multiprocessing.Process(target=do_test,
+ args=(runner_conf,
+ case_config,
+ Use_Dashboard,
+ test_config["contexts"],
+ ))
+ process.start()
+ process_queue.put(process)
+
+ result = result + tenant_number
+ tenant_number = threshhold
+ time.sleep(timeout)
+
+ while process_queue.qsize():
+ process = process_queue.get()
+ process.terminate()
+
+ if result is initial:
+ result = 0
+ else:
+ result = result - threshhold
+
+ LOG.info("Finished bottlenecks testcase")
+ LOG.info("The result data is %s", result)
+ return result
diff --git a/testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py b/testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py
new file mode 100644
index 00000000..6d53515f
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+"""This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test."""
+
+import utils.logger as log
+import uuid
+import json
+import os
+import time
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.quota_prepare as quota_prepare
+import utils.env_prepare.stack_prepare as stack_prepare
+
+import utils.infra_setup.runner.docker_env as docker_env
+import utils.infra_setup.runner.yardstick as yardstick_task
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+
+testcase_name = ("tc_heat_rfc2544_ipv4_1rule_"
+ "1flow_64B_trex_correlated_traffic_scale_out")
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+cidr = ("/home/opnfv/repos/yardstick/samples/vnf_samples/nsut/acl/"
+ "tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_correlated_"
+ "traffic_scale_out.yaml")
+runner_DEBUG = True
+
+
+def env_pre(test_config):
+ test_yardstick = False
+ if "yardstick" in test_config["contexts"].keys():
+ test_yardstick = True
+ print(test_yardstick)
+ stack_prepare._prepare_env_daemon(test_yardstick)
+ quota_prepare.quota_env_prepare()
+ cmd = ('yardstick env prepare')
+ LOG.info("yardstick environment prepare!")
+ print docker_env.yardstick_info['container']
+ if(test_config["contexts"]['yardstick_envpre']):
+ yardstick_container = docker_env.yardstick_info['container']
+ stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ LOG.debug(stdout)
+
+
+def config_to_result(test_config, test_result):
+ final_data = []
+ print(test_result)
+ out_data = test_result["result"]["testcases"]
+ test_data = out_data[testcase_name]["tc_data"]
+ for result in test_data:
+ testdata = {}
+ testdata["sequence"] = result["sequence"]
+ traffic_result = result["data"]["tg__0"]
+ if traffic_result:
+ testdata["RxThroughput"] = traffic_result["RxThroughput"]
+ testdata["TxThroughput"] = traffic_result["TxThroughput"]
+ testdata["DropPercentage"] = traffic_result["DropPercentage"]
+ final_data.append(testdata)
+ return final_data
+
+
+def testcase_parser(out_file="yardstick.out", **parameter_info):
+ cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+ cidr=cidr,
+ outfile=out_file,
+ parameter=parameter_info)
+ return cmd
+
+
+def do_test(test_config, Use_Dashboard, context_conf):
+ yardstick_container = docker_env.yardstick_info['container']
+ out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+ cmd = testcase_parser(out_file=out_file, **test_config)
+ print(cmd)
+ stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ LOG.info(stdout)
+ loop_value = 0
+ while loop_value < 60:
+ time.sleep(2)
+ loop_value = loop_value + 1
+ with open(out_file) as f:
+ data = json.load(f)
+ if data["status"] == 1:
+ LOG.info("yardstick run success")
+ break
+ elif data["status"] == 2:
+ LOG.error("yardstick error exit")
+ exit()
+ # data = json.load(output)
+
+ save_data = config_to_result(test_config, data)
+ print("^^^^^^^^^^^^^^^^^^^^^^^^^")
+ print save_data
+ if Use_Dashboard is True:
+ print("use dashboard")
+ # DashBoard.dashboard_send_data(context_conf, save_data)
+
+ # return save_data["data_body"]
+ return save_data
+
+
+def run(test_config):
+ print test_config
+ load_config = test_config["load_manager"]
+ scenarios_conf = load_config["scenarios"]
+ Use_Dashboard = True
+ env_pre(test_config)
+ if test_config["contexts"]["yardstick_ip"] is None:
+ load_config["contexts"]["yardstick_ip"] =\
+ conf_parser.ip_parser("yardstick_test_ip")
+
+ if "dashboard" in test_config["contexts"].keys():
+ if test_config["contexts"]["dashboard_ip"] is None:
+ test_config["contexts"]["dashboard_ip"] =\
+ conf_parser.ip_parser("dashboard")
+ LOG.info("Create Dashboard data")
+ Use_Dashboard = True
+
+ num_vnfs = conf_parser.str_to_list(scenarios_conf["number_vnfs"])
+ iterations = scenarios_conf["iterations"]
+ interval = scenarios_conf["interval"]
+ load_config["result_file"] = os.path.dirname(
+ os.path.abspath(__file__)) + "/test_case/result"
+
+ result = []
+
+ for i in range(0, len(num_vnfs)):
+ print i
+ case_config = {"num_vnfs": int(num_vnfs[i]),
+ "iterations": iterations,
+ "interval": interval}
+ data_reply = do_test(case_config, Use_Dashboard,
+ test_config["contexts"])
+ result.append(data_reply)
+
+ LOG.info("Finished bottlenecks testcase")
+ LOG.info("The result data is %s", result)
+ return result
diff --git a/testsuites/run_testsuite.py b/testsuites/run_testsuite.py
index e7276689..2e82b205 100644
--- a/testsuites/run_testsuite.py
+++ b/testsuites/run_testsuite.py
@@ -110,7 +110,7 @@ def testsuite_run(test_level, test_name, REPORT="False"):
try:
posca_testcase_run(tester_parser[0], testcase, config[testcase])
except Exception, e:
- LOG.warning('e.message:\t', e.message)
+ LOG.warning('e.message:\t%s', e.message)
stop_date = datetime.datetime.now()
LOG.info("End of %s testcase in POSCA testsuite", testcase)
criteria = "FAIL"