summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xtestsuites/posca/run_posca.py4
-rwxr-xr-xtestsuites/posca/testcase_cfg/posca_factor_system_bandwidth.yaml2
-rwxr-xr-xtestsuites/posca/testcase_dashboard/system_bandwidth.py204
-rw-r--r--testsuites/posca/testcase_script/posca_factor_system_bandwidth.py41
-rw-r--r--utils/parser.py15
5 files changed, 158 insertions, 108 deletions
diff --git a/testsuites/posca/run_posca.py b/testsuites/posca/run_posca.py
index a687e00e..72a0d4c2 100755
--- a/testsuites/posca/run_posca.py
+++ b/testsuites/posca/run_posca.py
@@ -22,7 +22,7 @@ INTERPRETER = "/usr/bin/python"
LOG = log.Logger(__name__).getLogger()
# ------------------------------------------------------
-# run posca testcase
+# run testcase in posca
# ------------------------------------------------------
@@ -40,6 +40,8 @@ def posca_run(test_level, test_name):
config = conf_parser.Parser.story_read("posca", test_name)
for testcase in config:
LOG.info("Begin to run %s testcase in POSCA testsuite", testcase)
+ config[testcase]['out_file'] =\
+ conf_parser.Parser.testcase_out_dir(testcase)
posca_testcase_run(testcase, config[testcase])
LOG.info("End of %s testcase in POSCA testsuite", testcase)
diff --git a/testsuites/posca/testcase_cfg/posca_factor_system_bandwidth.yaml b/testsuites/posca/testcase_cfg/posca_factor_system_bandwidth.yaml
index e7238d2c..dcea7275 100755
--- a/testsuites/posca/testcase_cfg/posca_factor_system_bandwidth.yaml
+++ b/testsuites/posca/testcase_cfg/posca_factor_system_bandwidth.yaml
@@ -7,7 +7,7 @@ test_config:
cpu_load: 0.9
latency: 100000
runner_config:
- dashboard: y
+ dashboard: "y"
dashboard_ip:
stack_create: yardstick
yardstick_test_ip:
diff --git a/testsuites/posca/testcase_dashboard/system_bandwidth.py b/testsuites/posca/testcase_dashboard/system_bandwidth.py
index 63671273..e95ff214 100755
--- a/testsuites/posca/testcase_dashboard/system_bandwidth.py
+++ b/testsuites/posca/testcase_dashboard/system_bandwidth.py
@@ -9,115 +9,121 @@
##############################################################################
import ConfigParser
from elasticsearch import Elasticsearch
-from pyroute2 import IPDB
import json
+import os
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+LOG = log.Logger(__name__).getLogger()
config = ConfigParser.ConfigParser()
+es = Elasticsearch()
+dashboard_dir = os.path.join(conf_parser.test_dir,
+ "posca",
+ "testcase_dashboard")
-dashboard_dir = "/home/opnfv/bottlenecks/testsuites/posca\
-/testcase_dashboard/"
-file_str = "/home/opnfv/bottlenecks/testsuites/posca/\
-testcase_cfg/posca_factor_system_bandwidth.yaml"
-with open(file_str, "rd") as cfgfile:
- config.readfp(cfgfile)
- ES_ip_a = config.get("config", "ES_ip")
+def dashboard_send_data(runner_config, test_data):
+ global es
+ es_ip = runner_config['dashboard_ip'].split(':')
+ es = Elasticsearch([{'host': es_ip[0]}])
+ res = es.index(index="bottlenecks",
+ doc_type=test_data["testcase"],
+ body=test_data["data_body"])
+ if res['created'] == "False":
+ LOG.error("date send to kibana have errors ", test_data["data_body"])
-with IPDB() as ip:
- GATEWAY_IP = ip.routes['default'].gateway
- if ES_ip_a is "":
- ES_ip_a = "{}:9200".format(GATEWAY_IP)
- print("ES_ip is null get local ip is {}".format(ES_ip_a))
-es_ip = ES_ip_a.split(':')
-es = Elasticsearch([{'host': es_ip[0]}])
+def dashboard_system_bandwidth(runner_config):
+ global es
+ es_ip = runner_config['dashboard_ip'].split(':')
+ es = Elasticsearch([{'host': es_ip[0]}])
+ # Create bottlenecks index
+ with open(dashboard_dir + 'posca_system_bandwidth_index_pattern.json')\
+ as index_pattern:
+ doc = json.load(index_pattern)
+ res = es.index(
+ index=".kibana",
+ doc_type="index-pattern",
+ id="bottlenecks",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("bottlenecks index-pattern has created")
+ else:
+ LOG.info("bottlenecks index-pattern has existed")
-# Create bottlenecks index
-with open(dashboard_dir + 'posca_system_bandwidth\
-_index_pattern.json') as index_pattern:
- doc = json.load(index_pattern)
-res = es.index(
- index=".kibana",
- doc_type="index-pattern",
- id="bottlenecks",
- body=doc)
-if res['created'] == "True":
- print("bottlenecks index-pattern has created")
-else:
- print("bottlenecks index-pattern has existed")
+ with open(dashboard_dir + 'posca_system_bandwidth_config.json')\
+ as index_config:
+ doc = json.load(index_config)
+ res = es.index(index=".kibana", doc_type="config", id="4.6.1", body=doc)
+ if res['created'] == "True":
+ LOG.info("bottlenecks config has created")
+ else:
+ LOG.info("bottlenecks config has existed")
-with open(dashboard_dir + 'posca_system_bandwidth\
-_config.json') as index_config:
- doc = json.load(index_config)
-res = es.index(index=".kibana", doc_type="config", id="4.6.1", body=doc)
-if res['created'] == "True":
- print("bottlenecks config has created")
-else:
- print("bottlenecks config has existed")
+ # Configure discover panel
+ with open(dashboard_dir + 'posca_system_bandwidth_discover.json')\
+ as index_discover:
+ doc = json.load(index_discover)
+ res = es.index(
+ index=".kibana",
+ doc_type="search",
+ id="system_bandwidth",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("system_bandwidth search has created")
+ else:
+ LOG.info("system_bandwidth search has existed")
-# Configure discover panel
-with open(dashboard_dir + 'posca_system_bandwidth\
-_discover.json') as index_discover:
- doc = json.load(index_discover)
-res = es.index(
- index=".kibana",
- doc_type="search",
- id="system_bandwidth",
- body=doc)
-if res['created'] == "True":
- print("system_bandwidth search has created")
-else:
- print("system_bandwidth search has existed")
+ # Create testing data in line graph
+ # Create testing data in line graph
+ with open(dashboard_dir + 'posca_system_bandwidth_line_data.json')\
+ as line_data:
+ doc = json.load(line_data)
+ res = es.index(
+ index=".kibana",
+ doc_type="visualization",
+ id="system_bandwidth_line-date",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("system_bandwidth_line-date visualization has created")
+ else:
+ LOG.info("system_bandwidth_line-date visualization has existed")
-# Create testing data in line graph
-with open(dashboard_dir + 'posca_system_bandwidth\
-_line_data.json') as line_data:
- doc = json.load(line_data)
-res = es.index(
- index=".kibana",
- doc_type="visualization",
- id="system_bandwidth_line-date",
- body=doc)
-if res['created'] == "True":
- print("system_bandwidth_line-date visualization has created")
-else:
- print("system_bandwidth_line-date visualization has existed")
+ # Create comparison results in line chart
+ with open(dashboard_dir + 'posca_system_bandwidth_line_char.json')\
+ as line_char:
+ doc = json.load(line_char)
+ res = es.index(
+ index=".kibana",
+ doc_type="visualization",
+ id="system_bandwidth_line-char",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("system_bandwidth_line-char visualization has created")
+ else:
+ LOG.info("system_bandwidth_line-char visualization has existed")
-# Create comparison results in line chart
-with open(dashboard_dir + 'posca_system_bandwidth\
-_line_char.json') as line_char:
- doc = json.load(line_char)
-res = es.index(
- index=".kibana",
- doc_type="visualization",
- id="system_bandwidth_line-char",
- body=doc)
-if res['created'] == "True":
- print("system_bandwidth_line-char visualization has created")
-else:
- print("system_bandwidth_line-char visualization has existed")
+ # Create monitoring data in table
+ with open(dashboard_dir + 'posca_system_bandwidth_terms_data.json')\
+ as terms_char:
+ doc = json.load(terms_char)
+ res = es.index(index=".kibana", doc_type="visualization",
+ id="system_bandwidth_terms_data", body=doc)
+ if res['created'] == "True":
+ LOG.info("system_bandwidth_terms_data visualization has created")
+ else:
+ LOG.info("system_bandwidth_terms_data visualization has existed")
-# Create monitoring data in table
-with open(dashboard_dir + 'posca_system_bandwidth\
-_terms_data.json') as terms_char:
- doc = json.load(terms_char)
-res = es.index(index=".kibana", doc_type="visualization",
- id="system_bandwidth_terms_data", body=doc)
-if res['created'] == "True":
- print("system_bandwidth_terms_data visualization has created")
-else:
- print("system_bandwidth_terms_data visualization has existed")
-
-# Create dashboard
-with open(dashboard_dir + 'posca_system_bandwidth\
-_dashboard.json') as dashboard:
- doc = json.load(dashboard)
-res = es.index(
- index=".kibana",
- doc_type="dashboard",
- id="system_bandwidth_dashboard",
- body=doc)
-if res['created'] == "True":
- print("system_bandwidth dashboard has created")
-else:
- print("system_bandwidth dashboard has existed")
+ # Create dashboard
+ with open(dashboard_dir + 'posca_system_bandwidth_dashboard.json')\
+ as dashboard:
+ doc = json.load(dashboard)
+ res = es.index(
+ index=".kibana",
+ doc_type="dashboard",
+ id="system_bandwidth_dashboard",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("system_bandwidth dashboard has created")
+ else:
+ LOG.info("system_bandwidth dashboard has existed")
diff --git a/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py b/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
index 4819fb45..01c5dab5 100644
--- a/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
+++ b/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
@@ -17,6 +17,7 @@ import time
import utils.logger as log
import utils.infra_setup.runner.yardstick as Runner
from utils.parser import Parser as conf_parser
+import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
# --------------------------------------------------
# logging configuration
# --------------------------------------------------
@@ -31,12 +32,23 @@ test_dict = {
"testcase": "netperf_bottlenecks"
}
}
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
def env_pre(con_dic):
Runner.Create_Incluxdb(con_dic['runner_config'])
+def config_to_result(test_config, test_result):
+ testdata = {}
+ test_result["throughput"] = float(test_result["throughput"])
+ test_result.update(test_config)
+ testdata["data_body"] = test_result
+ testdata["testcase"] = testcase
+ return testdata
+
+
def do_test(test_config, con_dic):
test_dict['args']['opts']['task-args'] = test_config
Task_id = Runner.Send_Data(test_dict, con_dic['runner_config'])
@@ -47,7 +59,12 @@ def do_test(test_config, con_dic):
Data_Reply[con_dic['runner_config']['yardstick_testcase']][0]
except IndexError:
test_date = do_test(test_config, con_dic)
- return test_date
+
+ save_data = config_to_result(test_config, test_date)
+ if con_dic['runner_config']['dashboard'] == 'y':
+ DashBoard.dashboard_send_data(con_dic['runner_config'], save_data)
+
+ return save_data["data_body"]
def run(con_dic):
@@ -58,17 +75,26 @@ def run(con_dic):
data["tx_pkt_sizes"] = tx_pkt_a
con_dic["result_file"] = os.path.dirname(
os.path.abspath(__file__)) + "/test_case/result"
- date_id = 0
cur_role_result = 1
pre_role_result = 1
pre_reply = {}
data_return = {}
data_max = {}
data_return["throughput"] = 1
+
if con_dic["runner_config"]["yardstick_test_ip"] is None:
con_dic["runner_config"]["yardstick_test_ip"] =\
conf_parser.ip_parser("yardstick_test_ip")
+
env_pre(con_dic)
+
+ if con_dic["runner_config"]["dashboard"] == 'y':
+ if con_dic["runner_config"]["dashboard_ip"] is None:
+ con_dic["runner_config"]["dashboard_ip"] =\
+ conf_parser.ip_parser("dashboard")
+ LOG.info("Create Dashboard data")
+ DashBoard.dashboard_system_bandwidth(con_dic["runner_config"])
+
for test_x in data["tx_pkt_sizes"]:
data_max["throughput"] = 1
bandwidth_tmp = 1
@@ -78,22 +104,23 @@ def run(con_dic):
"rx_msg_size": float(test_y),
"test_time": con_dic['test_config']['test_time']
}
- date_id = date_id + 1
data_reply = do_test(test_config, con_dic)
- bandwidth = float(data_reply["throughput"])
+ conf_parser.result_to_file(data_reply, con_dic["out_file"])
+ bandwidth = data_reply["throughput"]
if (data_max["throughput"] < bandwidth):
data_max = data_reply
if (abs(bandwidth_tmp - bandwidth) / bandwidth_tmp < 0.025):
- print(pre_reply)
+ LOG.info("this group of data has reached top output")
break
else:
pre_reply = data_reply
bandwidth_tmp = bandwidth
cur_role_result = float(pre_reply["throughput"])
if (abs(pre_role_result - cur_role_result) / pre_role_result < 0.025):
- print("date_id is %d,package return at line 111\n" % date_id)
+ LOG.info("The performance increases slowly")
if data_return["throughput"] < data_max["throughput"]:
data_return = data_max
pre_role_result = cur_role_result
- print("date_id is %d,id return success\n" % date_id)
+ LOG.info("Find bottlenecks of this config")
+ LOG.info("The max data is %d", data_return["throughput"])
return data_return
diff --git a/utils/parser.py b/utils/parser.py
index a9098a98..b5f29679 100644
--- a/utils/parser.py
+++ b/utils/parser.py
@@ -14,6 +14,8 @@ Second is reading config file.'''
import os
import yaml
+import json
+import time
from pyroute2 import IPDB
@@ -76,6 +78,13 @@ class Parser():
if not os.path.exists(dirname):
os.makedirs(dirname)
+ @classmethod
+ def testcase_out_dir(cls, testcase):
+ file_time = time.strftime('%H_%M', time.localtime(time.time()))
+ out_name = cls.bottlenecks_config["log_dir"] + testcase + file_time
+ outfile_name = out_name + ".out"
+ return outfile_name
+
@staticmethod
def config_parser(testcase_cfg, parameters):
test_cfg = testcase_cfg['test_config']
@@ -93,6 +102,12 @@ class Parser():
TEST_IP = GATEWAY_IP + ":8888"
return TEST_IP
+ @staticmethod
+ def result_to_file(data, file_name):
+ with open(file_name, "a") as f:
+ f.write(json.dumps(data, f))
+ f.write("\n")
+
class HeatTemplate_Parser():
"""parser a Heat template and a method to deploy template to a stack"""