summaryrefslogtreecommitdiffstats
path: root/testsuites
diff options
context:
space:
mode:
Diffstat (limited to 'testsuites')
-rw-r--r--testsuites/posca/testcase_cfg/posca_factor_tx_cache_size.yaml11
-rw-r--r--testsuites/posca/testcase_cfg/posca_factor_tx_pkt_size.yaml12
-rw-r--r--testsuites/posca/testcase_script/posca_factor_tx_cache_size.py117
-rw-r--r--testsuites/posca/testcase_script/posca_factor_tx_pkt_size.py122
-rwxr-xr-xtestsuites/rubbos/run_rubbos.py257
-rwxr-xr-xtestsuites/vstf/run_vstf.py173
-rwxr-xr-xtestsuites/vstf/vstf_collector.py29
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/agent.py24
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/basic/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/basic/collect.py44
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/basic/commandline.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/basic/device_manager.py13
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/basic/image_manager.py52
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/basic/source_manager.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm9pfs.py17
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_manager.py37
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_xml_help.py5
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/builder.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/__init__.py2
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/manager.py4
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/model.py6
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/origin_driver.py4
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/FSMonitor.py62
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/constant.py2
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/utils.py10
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/libvirt_plugin.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/tester_env_plugin.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/bridge_plugin.py5
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/manager.py4
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/model.py2
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/ovs_plugin.py43
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/equalizer/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/equalizer/equalizer.py10
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/equalizer/get_info.py19
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/equalizer/optimize.py13
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/affctl.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/iperf.py16
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/netmap.py16
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/netns.py6
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/netperf.py16
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/pktgen.py15
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/qperf.py16
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/sar.py10
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/utils.py4
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/vnstat.py5
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/perf/vstfperf.py6
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/softagent.py14
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/spirent/spirent.py267
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/spirent/tools.py424
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/agent/spirentagent.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/candy_text.py4
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/cfgparser.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/cliutil.py3
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/cmds.py4
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/daemon.py32
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/decorator.py11
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/log.py5
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/pyhtml.py133
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/rsync.py93
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/saltstack.py50
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/ssh.py33
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/test_func.py2
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/unix.py34
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/utils.py16
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/common/vstfcli.py13
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/api_server.py75
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/database/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/database/dbinterface.py388
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/database/tables.py13
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/env_build/cfg_intent_parse.py5
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_build.py6
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_collect.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/fabricant.py34
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/mail.py10
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/sendmail.py9
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/__init__.py2
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/candy_generator.py10
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/data_factory.py155
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/html_base.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/htmlcreator.py6
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/element.py70
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdfcreator.py6
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdftemplate.py88
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/story.py50
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/html_provider.py8
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/pdf_provider.py8
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/reporters/reporter.py11
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/cpu_settings.py24
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/device_settings.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/flows_settings.py44
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/forwarding_settings.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/html_settings.py16
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/mail_settings.py29
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/perf_settings.py21
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/settings.py79
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/template_settings.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/tester_settings.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/settings/tool_settings.py23
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/spirent/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/spirent/appliance.py10
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/model.py168
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/result_analysis.py10
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/flow_producer.py11
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/model.py16
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/perf_provider.py52
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/performance.py54
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/raw_data.py14
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/unittest/__init__.py1
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/unittest/configuration.py2
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/unittest/model.py2
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_cfg_intent_parse.py3
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_collect.py36
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_driver_function.py17
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_env_build.py27
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_perf.py14
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_ssh.py6
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/controller/vstfadm.py115
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/__init__.py2
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_consumer.py31
-rw-r--r--testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_producer.py34
130 files changed, 2975 insertions, 1215 deletions
diff --git a/testsuites/posca/testcase_cfg/posca_factor_tx_cache_size.yaml b/testsuites/posca/testcase_cfg/posca_factor_tx_cache_size.yaml
new file mode 100644
index 00000000..f2379bcc
--- /dev/null
+++ b/testsuites/posca/testcase_cfg/posca_factor_tx_cache_size.yaml
@@ -0,0 +1,11 @@
+[config]
+test_ip: 192.168.23.2:8888
+throughput: 1000
+tool: netperf
+protocol: tcp
+test_time: 30
+pkt sizes: 8,16,32,64,128,256,512,1024,2048
+tx cache sizes: 2304
+rx cache sizes: 1152,2304,4608,9216,18432,32768,65536,87380,131072
+cpu load: 0.9
+latency: 100000
diff --git a/testsuites/posca/testcase_cfg/posca_factor_tx_pkt_size.yaml b/testsuites/posca/testcase_cfg/posca_factor_tx_pkt_size.yaml
new file mode 100644
index 00000000..dcceee02
--- /dev/null
+++ b/testsuites/posca/testcase_cfg/posca_factor_tx_pkt_size.yaml
@@ -0,0 +1,12 @@
+[config]
+test_ip: 192.168.23.2:8888
+throughput: 1000
+tool: netperf
+protocol: tcp
+test_time: 30
+tx pkt sizes: 8
+rx pkt sizes: 8,16,32,64,128,256,512,1024,2048
+tx cache sizes: 2304,4608,9216,18432,32768,65536
+rx cache sizes: 1152,2304,4608,9216,18432,32768,65536,131072
+cpu load: 0.9
+latency: 100000
diff --git a/testsuites/posca/testcase_script/posca_factor_tx_cache_size.py b/testsuites/posca/testcase_script/posca_factor_tx_cache_size.py
new file mode 100644
index 00000000..525f91de
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_factor_tx_cache_size.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import argparse
+import time
+import logging
+import ConfigParser
+import json
+
+#------------------------------------------------------
+# parser for configuration files in each test case
+# ------------------------------------------------------
+parser = argparse.ArgumentParser()
+parser.add_argument("-c", "--conf",
+ help="configuration files for the testcase, in yaml format",
+ default="/home/opnfv/bottlenecks/testsuites/posca/testcase_cfg/posca_factor_tx_cache_size.yaml")
+args = parser.parse_args()
+
+cmd="curl -i"
+order_arg="-H \"Content-Type: application/json\" -X POST -d \'{\"cmd\": \"start\", \"opts\":{\"output-file\": \"/tem/yardstick.out\"}, \"args\": \"../samples/netperf.yaml\"}'"
+
+#--------------------------------------------------
+# logging configuration
+#--------------------------------------------------
+logger = logging.getLogger(__name__)
+
+def posca_env_check():
+ print "========== posca system bandwidth env check ==========="
+ if os.path.exists(r'/home/opnfv/bottlenecks/testsuites/posca/test_result/'):
+ return True
+ else:
+ os.mkdirs(r'/home/opnfv/bottlenecks/testsuites/posca/test_result/')
+
+def posca_output_result(time_new,input_1,input_2,input_3,input_4,input_5):
+ save_dic={}
+ save_dic['tx_pkt_size']=input_1
+ save_dic['rx_cache_size']=input_2
+ save_dic['throughput ']=input_3
+ save_dic['latency']=input_4
+ save_dic['cpu_load']=input_5
+ with open("/home/opnfv/bottlenecks/testsuites/posca/test_result/factor_tx_cache_size_%s.json"%(time_new),"a") as f:
+ f.write(json.dumps(save_dic,f))
+ f.write("\n")
+
+def posca_config_read(config_str):
+ print "========== posca system bandwidth config read ==========="
+
+ con_dic = {}
+ config = ConfigParser.ConfigParser()
+ with open(config_str,"rd") as cfgfile:
+ config.readfp(cfgfile)
+ con_dic['test_ip']=config.get("config","test_ip")
+ con_dic['test_throughput']=config.get("config","throughput")
+ con_dic['test_tool']=config.get("config","tool")
+ con_dic['test_time']=config.get("config","test_time")
+ con_dic['test_protocol']=config.get("config","protocol")
+ con_dic['test_pkt_s']=config.get("config","pkt sizes")
+ con_dic['test_tx_cache_s']=config.get("config","tx cache sizes")
+ con_dic['test_rx_cache_s']=config.get("config","rx cache sizes")
+ con_dic['test_cpu_load']=config.get("config","cpu load")
+ con_dic['test_latency']=config.get("config","latency")
+
+ return con_dic
+
+def posca_run(con_dic):
+ print "========== run posca system bandwidth ==========="
+
+ test_pkt_s_a = con_dic['test_pkt_s'].split(',')
+ test_rx_cache_s_a = con_dic['test_rx_cache_s'].split(',')
+ time_new = time.strftime('%H_%M',time.localtime(time.time()))
+ bandwidth_tmp = 1
+
+ for test_pkt_s_e in test_pkt_s_a:
+ for test_rx_cache_s_e in test_rx_cache_s_a:
+ print "Package size %s"%(test_pkt_s_e)
+ order_excute = os.popen("%s %s http://%s/api/v3/yardstick/tasks/task %s %s %s %s"%(cmd,order_arg,con_dic['test_ip'],test_pkt_s_e,test_pkt_s_e,con_dic['test_tx_cache_s'],test_rx_cache_s_e))
+ order_result = order_excute.read()
+ test_id = order_result.find("task_id")
+ time.sleep(con_dic['test_time'])
+ cmd_excute = os.popen( "%s http://%s/api/v3/yardstick/testresults?task_id=%s"%(cmd,con_dic['test_ip'],test_id))
+ test_result = cmd_excute.read()
+ bandwidth = test_result.find("bandwidth")
+ cpu_load = test_result.find("cpu_load")
+ latency = test_result.find("latency")
+ posca_output_result(time_new,test_pkt_s_e,test_rx_cache_s_e,bandwidth,latency,cpu_load)
+ if (abs(bandwidth-con_dic['test_throughput'])/con_dic['test_throughput'] >0.05) and (latency < con_dic['test_latency']) and (cpu_load < con_dic['test_cpu_load']):
+ if abs(bandwidth_tmp-bandwidth)/bandwidth <0.05:
+ return True
+ else:
+ print "%s,%s"%(bandwidth,test_rx_cache_s_e)
+ else:
+ print "%s,%s"%(bandwidth,test_rx_cache_s_e)
+ return False
+
+def main():
+ if not (args.conf):
+ logger.error("Configuration files do not exist for the specified testcases")
+ exit(-1)
+ else:
+ testcase_cfg = args.conf
+
+ con_dic=posca_config_read(testcase_cfg)
+ posca_env_check()
+ posca_run(con_dic)
+
+ time.sleep(5)
+
+if __name__=='__main__':
+ main()
diff --git a/testsuites/posca/testcase_script/posca_factor_tx_pkt_size.py b/testsuites/posca/testcase_script/posca_factor_tx_pkt_size.py
new file mode 100644
index 00000000..4b44c853
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_factor_tx_pkt_size.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import argparse
+import time
+import logging
+import ConfigParser
+import json
+
+#------------------------------------------------------
+# parser for configuration files in each test case
+# ------------------------------------------------------
+parser = argparse.ArgumentParser()
+parser.add_argument("-c", "--conf",
+ help="configuration files for the testcase, in yaml format",
+ default="/home/opnfv/bottlenecks/testsuites/posca/testcase_cfg/posca_factor_tx_pkt_size.yaml")
+args = parser.parse_args()
+
+cmd="curl -i"
+order_arg="-H \"Content-Type: application/json\" -X POST -d \'{\"cmd\": \"start\", \"opts\":{\"output-file\": \"/tem/yardstick.out\"}, \"args\": \"../samples/netperf.yaml\"}'"
+
+#--------------------------------------------------
+# logging configuration
+#--------------------------------------------------
+logger = logging.getLogger(__name__)
+
+def posca_env_check():
+ print "========== posca system bandwidth env check ==========="
+ if os.path.exists(r'/home/opnfv/bottlenecks/testsuites/posca/test_result/'):
+ return True
+ else:
+ os.mkdirs(r'/home/opnfv/bottlenecks/testsuites/posca/test_result/')
+
+def posca_output_result(time_new,input_1,input_2,input_3,input_4,input_5,input_6):
+ save_dic={}
+ save_dic['rx_pkt_size']=input_1
+ save_dic['tx_cache_size']=input_2
+ save_dic['tx_cache_size']=input_3
+ save_dic['throughput']=input_4
+ save_dic['latency']=input_5
+ save_dic['cpu_load']=input_6
+ with open("/home/opnfv/bottlenecks/testsuites/posca/test_result/factor_tx_pkt_size_%s.json"%(time_new),"a") as f:
+ f.write(json.dumps(save_dic,f))
+ f.write("\n")
+
+def posca_config_read(config_str):
+ print "========== posca system bandwidth config read ==========="
+
+ con_dic = {}
+ config = ConfigParser.ConfigParser()
+ with open(config_str,"rd") as cfgfile:
+ config.readfp(cfgfile)
+ con_dic['test_ip']=config.get("config","test_ip")
+ con_dic['test_tool']=config.get("config","tool")
+ con_dic['test_time']=config.get("config","test_time")
+ con_dic['test_protocol']=config.get("config","protocol")
+ con_dic['test_tx_pkt_s']=config.get("config","tx pkt sizes")
+ con_dic['test_rx_pkt_s']=config.get("config","rx pkt sizes")
+ con_dic['test_tx_cache_s']=config.get("config","tx cache sizes")
+ con_dic['test_rx_cache_s']=config.get("config","rx cache sizes")
+ con_dic['test_cpu_load']=config.get("config","cpu load")
+ con_dic['test_latency']=config.get("config","latency")
+
+ return con_dic
+
+def posca_run(con_dic):
+ print "========== run posca system bandwidth ==========="
+
+ test_rx_pkt_s_a = con_dic['test_rx_pkt_s'].split(',')
+ test_tx_cache_s_a = con_dic['test_tx_cache_s'].split(',')
+ test_rx_cache_s_a = con_dic['test_rx_cache_s'].split(',')
+ time_new = time.strftime('%H_%M',time.localtime(time.time()))
+ bandwidth_tmp = 1
+
+ for test_rx_cache_s_e in test_rx_cache_s_a:
+ for test_tx_cache_s_e in test_tx_cache_s_a:
+ for test_rx_pkt_s_e in test_rx_pkt_s_a:
+ print "%s,%s,%s"%(test_rx_pkt_s_e,test_tx_cache_s_e,test_rx_cache_s_e)
+ order_excute = os.popen("%s %s http://%s/api/v3/yardstick/tasks/task %s %s %s"%(cmd,order_arg,con_dic['test_ip'],test_rx_pkt_s_e,test_rx_cache_s_e,test_tx_cache_s_e))
+ order_result = order_excute.read()
+ task_id = order_result.find("task_id")
+ time.sleep(con_dic['test_time'])
+ cmd_excute = os.popen( "%s http://%s/api/v3/yardstick/testresults?task_id=%s"%(cmd,con_dic['test_ip'],task_id))
+ test_result = cmd_excute.read()
+ bandwidth = test_result.find("bandwidth")
+ cpu_load = test_result.find("cpu_load")
+ latency = test_result.find("latency")
+ posca_output_result(time_new,test_rx_pkt_s_e,test_rx_cache_s_e,test_tx_cache_s_e,bandwidth,latency,cpu_load)
+ if (abs(bandwidth-con_dic['test_throughput'])/con_dic['test_throughput'] >0.05) and (latency < con_dic['test_latency']) and (cpu_load < con_dic['test_cpu_load']):
+ if (abs(bandwidth_tmp-bandwidth)/bandwidth <0.05):
+ print "%s,%s,%s,%s,%s,%s"%(test_rx_pkt_s_e,test_rx_cache_s_e,test_tx_cache_s_e,bandwidth,latency,cpu_load)
+ return True
+ else:
+ bandwidth_tmp = bandwidth
+ else:
+ print "%s,%s,%s,%s,%s,%s"%(test_rx_pkt_s_e,test_rx_cache_s_e,test_tx_cache_s_e,bandwidth,latency,cpu_load)
+ return False
+
+
+def main():
+ if not (args.conf):
+ logger.error("Configuration files do not exist for the specified testcases")
+ exit(-1)
+ else:
+ testcase_cfg = args.conf
+
+ con_dic=posca_config_read(testcase_cfg)
+ posca_env_check()
+ posca_run(con_dic)
+
+ time.sleep(5)
+
+if __name__=='__main__':
+ main()
diff --git a/testsuites/rubbos/run_rubbos.py b/testsuites/rubbos/run_rubbos.py
index 455b3e58..63b9ae07 100755
--- a/testsuites/rubbos/run_rubbos.py
+++ b/testsuites/rubbos/run_rubbos.py
@@ -24,9 +24,11 @@ from novaclient.client import Client as NovaClient
# parser for configuration files in each test case
# ------------------------------------------------------
parser = argparse.ArgumentParser()
-parser.add_argument("-c", "--conf",
- help="configuration files for the testcase, in yaml format",
- default="/home/opnfv/bottlenecks/testsuites/rubbos/testcase_cfg/rubbos_basic.yaml")
+parser.add_argument(
+ "-c",
+ "--conf",
+ help="configuration files for the testcase, in yaml format",
+ default="/home/opnfv/bottlenecks/testsuites/rubbos/testcase_cfg/rubbos_basic.yaml")
args = parser.parse_args()
#--------------------------------------------------
@@ -37,31 +39,40 @@ logger = logging.getLogger(__name__)
def _get_keystone_client():
keystone_client = KeystoneClient(
- auth_url=os.environ.get('OS_AUTH_URL'),
- username=os.environ.get('OS_USERNAME'),
- password=os.environ.get('OS_PASSWORD'),
- tenant_name=os.environ.get('OS_TENANT_NAME'),
- cacert=os.environ.get('OS_CACERT'))
+ auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'),
+ cacert=os.environ.get('OS_CACERT'))
return keystone_client
+
def _get_heat_client():
keystone = _get_keystone_client()
- heat_endpoint = keystone.service_catalog.url_for(service_type='orchestration')
- heat_client = HeatClient('1', endpoint=heat_endpoint, token=keystone.auth_token)
+ heat_endpoint = keystone.service_catalog.url_for(
+ service_type='orchestration')
+ heat_client = HeatClient(
+ '1',
+ endpoint=heat_endpoint,
+ token=keystone.auth_token)
return heat_client
+
def _get_glance_client():
keystone = _get_keystone_client()
- glance_endpoint = keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
+ glance_endpoint = keystone.service_catalog.url_for(
+ service_type='image', endpoint_type='publicURL')
return GlanceClient(glance_endpoint, token=keystone.auth_token)
+
def _get_nova_client():
nova_client = NovaClient("2", os.environ.get('OS_USERNAME'),
- os.environ.get('OS_PASSWORD'),
- os.environ.get('OS_TENANT_NAME'),
- os.environ.get('OS_AUTH_URL'))
+ os.environ.get('OS_PASSWORD'),
+ os.environ.get('OS_TENANT_NAME'),
+ os.environ.get('OS_AUTH_URL'))
return nova_client
+
def _download_url(src_url, dest_dir):
''' Download a file to a destination path given a URL'''
file_name = src_url.rsplit('/')[-1]
@@ -76,21 +87,27 @@ def _download_url(src_url, dest_dir):
return dest
-def rubbos_stack_satisfy(name="bottlenecks_rubbos_stack", status="CREATE_COMPLETE"):
+def rubbos_stack_satisfy(
+ name="bottlenecks_rubbos_stack",
+ status="CREATE_COMPLETE"):
heat = _get_heat_client()
for stack in heat.stacks.list():
- if status == None and stack.stack_name == name:
+ if status is None and stack.stack_name == name:
# Found target stack
print "Found stack, name=" + str(stack.stack_name)
return True
- elif stack.stack_name == name and stack.stack_status==status:
+ elif stack.stack_name == name and stack.stack_status == status:
print "Found stack, name=" + str(stack.stack_name) + ", status=" + str(stack.stack_status)
return True
return False
+
def rubbos_env_prepare(template=None):
print "========== Prepare rubbos environment =========="
- logger.info("Generate heat template for the testcase based on template '%s'." % template)
+ logger.info(
+ "Generate heat template for the testcase based on template '%s'." %
+ template)
+
def rubbos_env_cleanup():
print "========== Cleanup rubbos environment =========="
@@ -119,25 +136,33 @@ def rubbos_env_cleanup():
heat.stacks.delete(stack.id)
timeInProgress = 0
- while rubbos_stack_satisfy(name="bottlenecks_rubbos_stack", status=None) and timeInProgress < 60:
+ while rubbos_stack_satisfy(
+ name="bottlenecks_rubbos_stack",
+ status=None) and timeInProgress < 60:
time.sleep(5)
timeInProgress = timeInProgress + 5
- if rubbos_stack_satisfy(name="bottlenecks_rubbos_stack", status=None) == True:
+ if rubbos_stack_satisfy(name="bottlenecks_rubbos_stack", status=None):
print "Failed to clean the stack"
return False
else:
return True
-def rubbos_create_images(imagefile=None, image_name="bottlenecks_rubbos_image"):
+
+def rubbos_create_images(
+ imagefile=None,
+ image_name="bottlenecks_rubbos_image"):
print "========== Create rubbos image in OS =========="
- if imagefile == None:
- print "imagefile not set/found"
- return False
+ if imagefile is None:
+ print "imagefile not set/found"
+ return False
glance = _get_glance_client()
- image = glance.images.create(name=image_name, disk_format="qcow2", container_format="bare")
+ image = glance.images.create(
+ name=image_name,
+ disk_format="qcow2",
+ container_format="bare")
with open(imagefile) as fimage:
glance.images.upload(image.id, fimage)
@@ -149,50 +174,65 @@ def rubbos_create_images(imagefile=None, image_name="bottlenecks_rubbos_image"):
timeInQueue = timeInQueue + 1
img_status = glance.images.get(image.id).status
- print "After %d seconds, the image's status is [%s]" %(timeInQueue, img_status)
+ print "After %d seconds, the image's status is [%s]" % (timeInQueue, img_status)
return True if img_status == "active" else False
+
def rubbos_create_keypairs(key_path, name="bottlenecks_rubbos_keypair"):
print "========== Add rubbos keypairs in OS =========="
nova = _get_nova_client()
with open(key_path) as pkey:
nova.keypairs.create(name=name, public_key=pkey.read())
-def rubbos_create_flavors(name="bottlenecks_rubbos_flavor", ram=4096, vcpus=2, disk=10):
+
+def rubbos_create_flavors(
+ name="bottlenecks_rubbos_flavor",
+ ram=4096,
+ vcpus=2,
+ disk=10):
print "========== Create rubbos flavors in OS =========="
nova = _get_nova_client()
nova.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk)
-def rubbos_create_instances(template_file, rubbos_parameters=None, stack_name="bottlenecks_rubbos_stack"):
+
+def rubbos_create_instances(
+ template_file,
+ rubbos_parameters=None,
+ stack_name="bottlenecks_rubbos_stack"):
print "========== Create rubbos instances =========="
heat = _get_heat_client()
with open(template_file) as template:
- stack = heat.stacks.create(stack_name=stack_name, template=template.read(), parameters=rubbos_parameters)
+ stack = heat.stacks.create(
+ stack_name=stack_name,
+ template=template.read(),
+ parameters=rubbos_parameters)
stack_id = stack['stack']['id']
stack_status = heat.stacks.get(stack_id).stack_status
print "Created stack, id=" + str(stack_id) + ", status=" + str(stack_status)
- timeInProgress= 0
+ timeInProgress = 0
while stack_status == "CREATE_IN_PROGRESS" and timeInProgress < 150:
- print " stack's status: %s, after %d seconds" %(stack_status, timeInProgress)
+ print " stack's status: %s, after %d seconds" % (stack_status, timeInProgress)
time.sleep(5)
timeInProgress = timeInProgress + 5
stack_status = heat.stacks.get(stack_id).stack_status
- print "After %d seconds, the stack's status is [%s]" %(timeInProgress, stack_status)
+ print "After %d seconds, the stack's status is [%s]" % (timeInProgress, stack_status)
return True if stack_status == "CREATE_COMPLETE" else False
+
def get_instances(nova_client):
try:
instances = nova_client.servers.list(search_opts={'all_tenants': 1})
return instances
- except Exception, e:
+ except Exception as e:
print "Error [get_instances(nova_client)]:", e
return None
+
def reboot_instances():
print("========== reboot instances ==========")
nova = _get_nova_client()
@@ -205,12 +245,13 @@ def reboot_instances():
instance.reboot()
print("Finish reboot all rubbos servers.")
+
def rubbos_run():
print "========== run rubbos ==========="
nova = _get_nova_client()
instances = get_instances(nova)
- if instances == None:
+ if instances is None:
print "Found *None* instances, exit rubbos_run()!"
return False
@@ -223,57 +264,83 @@ def rubbos_run():
database_servers = ""
for instance in instances:
name = getattr(instance, 'name')
- private_ip = [ x['addr'] for x in getattr(instance, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'fixed']
- public_ip = [ x['addr'] for x in getattr(instance, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'floating']
+ private_ip = [
+ x['addr'] for x in getattr(
+ instance,
+ 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'fixed']
+ public_ip = [
+ x['addr'] for x in getattr(
+ instance,
+ 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'floating']
if name.find("rubbos-control") >= 0:
control_public_ip = public_ip[0]
- control_server = str(name) + ':' + public_ip[0] + ':' + private_ip[0]
+ control_server = str(name) + ':' + \
+ public_ip[0] + ':' + private_ip[0]
if name.find("rubbos-client") >= 0:
- client_servers = client_servers + str(name)+':'+private_ip[0] + ","
+ client_servers = client_servers + \
+ str(name) + ':' + private_ip[0] + ","
if name.find("rubbos-httpd") >= 0:
- web_servers = web_servers + str(name)+':'+private_ip[0] + ","
+ web_servers = web_servers + str(name) + ':' + private_ip[0] + ","
if name.find("rubbos-tomcat") >= 0:
app_servers = app_servers + str(name) + ':' + private_ip[0] + ","
if name.find("rubbos-cjdbc") >= 0:
cjdbc_controller = str(name) + ':' + private_ip[0]
if name.find("rubbos-mysql") >= 0:
- database_servers = database_servers + str(name) + ':' + private_ip[0] + ","
+ database_servers = database_servers + \
+ str(name) + ':' + private_ip[0] + ","
- client_servers = client_servers[0:len(client_servers)-1]
- web_servers = web_servers[0:len(web_servers)-1]
- app_servers = app_servers[0:len(app_servers)-1]
- database_servers = database_servers[0:len(database_servers)-1]
+ client_servers = client_servers[0:len(client_servers) - 1]
+ web_servers = web_servers[0:len(web_servers) - 1]
+ app_servers = app_servers[0:len(app_servers) - 1]
+ database_servers = database_servers[0:len(database_servers) - 1]
print "control_server: %s" % control_server
print "client_servers: %s" % client_servers
print "web_servers: %s" % web_servers
print "app_servers: %s" % app_servers
print "cjdbc_controller: %s" % cjdbc_controller
print "database_servers: %s" % database_servers
- with open(Bottlenecks_repo_dir+"/testsuites/rubbos/puppet_manifests/internal/rubbos.conf") as temp_f, open('rubbos.conf', 'w') as new_f:
+ with open(Bottlenecks_repo_dir + "/testsuites/rubbos/puppet_manifests/internal/rubbos.conf") as temp_f, open('rubbos.conf', 'w') as new_f:
for line in temp_f.readlines():
- if line.find("REPLACED_CONTROLLER") >= 0 :
- new_f.write( line.replace("REPLACED_CONTROLLER", control_server) )
+ if line.find("REPLACED_CONTROLLER") >= 0:
+ new_f.write(
+ line.replace(
+ "REPLACED_CONTROLLER",
+ control_server))
elif line.find("REPLACED_CLIENT_SERVERS") >= 0:
- new_f.write( line.replace("REPLACED_CLIENT_SERVERS", client_servers) )
+ new_f.write(
+ line.replace(
+ "REPLACED_CLIENT_SERVERS",
+ client_servers))
elif line.find("REPLACED_WEB_SERVERS") >= 0:
- new_f.write( line.replace("REPLACED_WEB_SERVERS", web_servers) )
+ new_f.write(line.replace("REPLACED_WEB_SERVERS", web_servers))
elif line.find("REPLACED_APP_SERVERS") >= 0:
- new_f.write( line.replace("REPLACED_APP_SERVERS", app_servers) )
+ new_f.write(line.replace("REPLACED_APP_SERVERS", app_servers))
elif line.find("REPLACED_CJDBC_CONTROLLER") >= 0:
- new_f.write( line.replace("REPLACED_CJDBC_CONTROLLER", cjdbc_controller) )
+ new_f.write(
+ line.replace(
+ "REPLACED_CJDBC_CONTROLLER",
+ cjdbc_controller))
elif line.find("REPLACED_DB_SERVERS") >= 0:
- new_f.write( line.replace("REPLACED_DB_SERVERS", database_servers) )
+ new_f.write(
+ line.replace(
+ "REPLACED_DB_SERVERS",
+ database_servers))
elif line.find("REPLACED_CLIENTS_PER_NODE") >= 0:
- new_f.write( line.replace("REPLACED_CLIENTS_PER_NODE", "200 400 800 1600 3200") )
+ new_f.write(
+ line.replace(
+ "REPLACED_CLIENTS_PER_NODE",
+ "200 400 800 1600 3200"))
else:
new_f.write(line)
if os.path.exists("rubbos.conf") == False:
return False
- cmd = "sudo chmod 0600 " + Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key"
+ cmd = "sudo chmod 0600 " + Bottlenecks_repo_dir + \
+ "/utils/infra_setup/bottlenecks_key/bottlenecks_key"
subprocess.call(cmd, shell=True)
- ssh_args = "-o StrictHostKeyChecking=no -o BatchMode=yes -i " + Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key "
+ ssh_args = "-o StrictHostKeyChecking=no -o BatchMode=yes -i " + \
+ Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key "
print "############### Test #################"
cmd = 'ssh-keygen -f "/root/.ssh/known_hosts" -R ' + control_public_ip
@@ -292,19 +359,23 @@ def rubbos_run():
subprocess.call("nova list", shell=True)
print "############### Test #################"
- cmd = "scp " + ssh_args + "rubbos.conf ubuntu@" + control_public_ip + ":/home/ubuntu/"
+ cmd = "scp " + ssh_args + "rubbos.conf ubuntu@" + \
+ control_public_ip + ":/home/ubuntu/"
print "Exec shell: " + cmd
subprocess.call(cmd, shell=True)
- cmd = "scp " + ssh_args + Bottlenecks_repo_dir + "/testsuites/rubbos/puppet_manifests/internal/run_rubbos_internal.sh ubuntu@" + control_public_ip + ":/home/ubuntu/"
+ cmd = "scp " + ssh_args + Bottlenecks_repo_dir + \
+ "/testsuites/rubbos/puppet_manifests/internal/run_rubbos_internal.sh ubuntu@" + control_public_ip + ":/home/ubuntu/"
print "Exec shell: " + cmd
subprocess.call(cmd, shell=True)
# call remote run_rubbos_internal.sh
- cmd = "ssh " + ssh_args + " ubuntu@" + control_public_ip + ' "sudo /home/ubuntu/run_rubbos_internal.sh /home/ubuntu/rubbos.conf /home/ubuntu/btnks-results" '
+ cmd = "ssh " + ssh_args + " ubuntu@" + control_public_ip + \
+ ' "sudo /home/ubuntu/run_rubbos_internal.sh /home/ubuntu/rubbos.conf /home/ubuntu/btnks-results" '
print "Exec shell: " + cmd
subprocess.call(cmd, shell=True)
- cmd = "scp " + ssh_args + " ubuntu@" + control_public_ip + ":/home/ubuntu/btnks-results/rubbos.out ./rubbos.out"
+ cmd = "scp " + ssh_args + " ubuntu@" + control_public_ip + \
+ ":/home/ubuntu/btnks-results/rubbos.out ./rubbos.out"
print "Exec shell: " + cmd
subprocess.call(cmd, shell=True)
if os.path.exists("rubbos.out") == False:
@@ -318,36 +389,40 @@ def rubbos_run():
print line
return True
+
def main():
global Heat_template
global Bottlenecks_repo_dir
global image_url
- Bottlenecks_repo_dir = "/home/opnfv/bottlenecks" # same in Dockerfile, docker directory
+ # same in Dockerfile, docker directory
+ Bottlenecks_repo_dir = "/home/opnfv/bottlenecks"
image_url = 'http://artifacts.opnfv.org/bottlenecks/rubbos/trusty-server-cloudimg-amd64-btnks.img'
#image_url = 'http://artifacts.opnfv.org/bottlenecks/rubbos/bottlenecks-trusty-server.img'
if not (args.conf):
- logger.error("Configuration files are not set for testcase")
- exit(-1)
+ logger.error("Configuration files are not set for testcase")
+ exit(-1)
else:
- Heat_template = args.conf
-
- master_user_data=""
- agent_user_data=""
- with open(Bottlenecks_repo_dir+"/utils/infra_setup/user_data/p-master-user-data") as f:
- master_user_data=f.read()
- master_user_data = master_user_data.replace('REPLACED_PUPPET_MASTER_SERVER','rubbos-control')
- with open(Bottlenecks_repo_dir+"/utils/infra_setup/user_data/p-agent-user-data") as f:
- agent_user_data=f.read()
- agent_user_data = agent_user_data.replace('REPLACED_PUPPET_MASTER_SERVER','rubbos-control')
-
- parameters={'image': 'bottlenecks_rubbos_image',
- 'key_name': 'bottlenecks_rubbos_keypair',
- 'flavor': 'bottlenecks_rubbos_flavor',
- 'public_net': os.environ.get('EXTERNAL_NET'),
- 'master_user_data': master_user_data,
- 'agent_user_data': agent_user_data }
+ Heat_template = args.conf
+
+ master_user_data = ""
+ agent_user_data = ""
+ with open(Bottlenecks_repo_dir + "/utils/infra_setup/user_data/p-master-user-data") as f:
+ master_user_data = f.read()
+ master_user_data = master_user_data.replace(
+ 'REPLACED_PUPPET_MASTER_SERVER', 'rubbos-control')
+ with open(Bottlenecks_repo_dir + "/utils/infra_setup/user_data/p-agent-user-data") as f:
+ agent_user_data = f.read()
+ agent_user_data = agent_user_data.replace(
+ 'REPLACED_PUPPET_MASTER_SERVER', 'rubbos-control')
+
+ parameters = {'image': 'bottlenecks_rubbos_image',
+ 'key_name': 'bottlenecks_rubbos_keypair',
+ 'flavor': 'bottlenecks_rubbos_flavor',
+ 'public_net': os.environ.get('EXTERNAL_NET'),
+ 'master_user_data': master_user_data,
+ 'agent_user_data': agent_user_data}
print "Heat_template_file: " + Heat_template
print "parameters:\n" + str(parameters)
@@ -360,31 +435,35 @@ def main():
dest_dir = "/tmp"
image_file = _download_url(image_url, dest_dir)
- if image_file == None:
- print "error with downloading image(s)"
- exit(-1)
+ if image_file is None:
+ print "error with downloading image(s)"
+ exit(-1)
image_created = rubbos_create_images(imagefile=image_file)
- keyPath = Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key.pub"
+ keyPath = Bottlenecks_repo_dir + \
+ "/utils/infra_setup/bottlenecks_key/bottlenecks_key.pub"
rubbos_create_keypairs(key_path=keyPath)
rubbos_create_flavors()
- if image_created == True:
- stack_created = rubbos_create_instances(template_file=Heat_template, rubbos_parameters=parameters, stack_name="bottlenecks_rubbos_stack")
+ if image_created:
+ stack_created = rubbos_create_instances(
+ template_file=Heat_template,
+ rubbos_parameters=parameters,
+ stack_name="bottlenecks_rubbos_stack")
else:
print "Cannot create instances, as Failed to create image(s)."
- exit (-1)
+ exit(-1)
print "Wait 600 seconds after stack creation..."
time.sleep(600)
- #reboot_instances()
- #time.sleep(180)
+ # reboot_instances()
+ # time.sleep(180)
rubbos_run()
time.sleep(30)
rubbos_env_cleanup()
-if __name__=='__main__':
+if __name__ == '__main__':
main()
diff --git a/testsuites/vstf/run_vstf.py b/testsuites/vstf/run_vstf.py
index 1aed7596..f0018e7a 100755
--- a/testsuites/vstf/run_vstf.py
+++ b/testsuites/vstf/run_vstf.py
@@ -24,9 +24,11 @@ from novaclient.client import Client as NovaClient
# parser for configuration files in each test case
# ------------------------------------------------------
parser = argparse.ArgumentParser()
-parser.add_argument("-c", "--conf",
- help="configuration files for the testcase, in yaml format",
- default="/home/opnfv/bottlenecks/testsuites/vstf/testcase_cfg/vstf_Tu1.yaml")
+parser.add_argument(
+ "-c",
+ "--conf",
+ help="configuration files for the testcase, in yaml format",
+ default="/home/opnfv/bottlenecks/testsuites/vstf/testcase_cfg/vstf_Tu1.yaml")
args = parser.parse_args()
#--------------------------------------------------
@@ -37,31 +39,40 @@ logger = logging.getLogger(__name__)
def _get_keystone_client():
keystone_client = KeystoneClient(
- auth_url=os.environ.get('OS_AUTH_URL'),
- username=os.environ.get('OS_USERNAME'),
- password=os.environ.get('OS_PASSWORD'),
- tenant_name=os.environ.get('OS_TENANT_NAME'),
- cacert=os.environ.get('OS_CACERT'))
+ auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'),
+ cacert=os.environ.get('OS_CACERT'))
return keystone_client
+
def _get_heat_client():
keystone = _get_keystone_client()
- heat_endpoint = keystone.service_catalog.url_for(service_type='orchestration')
- heat_client = HeatClient('1', endpoint=heat_endpoint, token=keystone.auth_token)
+ heat_endpoint = keystone.service_catalog.url_for(
+ service_type='orchestration')
+ heat_client = HeatClient(
+ '1',
+ endpoint=heat_endpoint,
+ token=keystone.auth_token)
return heat_client
+
def _get_glance_client():
keystone = _get_keystone_client()
- glance_endpoint = keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
+ glance_endpoint = keystone.service_catalog.url_for(
+ service_type='image', endpoint_type='publicURL')
return GlanceClient(glance_endpoint, token=keystone.auth_token)
+
def _get_nova_client():
nova_client = NovaClient("2", os.environ.get('OS_USERNAME'),
- os.environ.get('OS_PASSWORD'),
- os.environ.get('OS_TENANT_NAME'),
- os.environ.get('OS_AUTH_URL'))
+ os.environ.get('OS_PASSWORD'),
+ os.environ.get('OS_TENANT_NAME'),
+ os.environ.get('OS_AUTH_URL'))
return nova_client
+
def _download_url(src_url, dest_dir):
''' Download a file to a destination path given a URL'''
file_name = src_url.rsplit('/')[-1]
@@ -75,22 +86,27 @@ def _download_url(src_url, dest_dir):
shutil.copyfileobj(response, f)
return dest
-def vstf_stack_satisfy(name="bottlenecks_vstf_stack", status="CREATE_COMPLETE"):
+
+def vstf_stack_satisfy(
+ name="bottlenecks_vstf_stack",
+ status="CREATE_COMPLETE"):
heat = _get_heat_client()
for stack in heat.stacks.list():
- if status == None and stack.stack_name == name:
+ if status is None and stack.stack_name == name:
# Found target stack
print "Found stack, name=" + str(stack.stack_name)
return True
- elif stack.stack_name == name and stack.stack_status==status:
+ elif stack.stack_name == name and stack.stack_status == status:
print "Found stack, name=" + str(stack.stack_name) + ", status=" + str(stack.stack_status)
return True
return False
+
def vstf_env_prepare(template=None):
print "========== Prepare vstf environment =========="
logger.info("env preparation for testcase.")
+
def vstf_env_cleanup():
print "========== Cleanup vstf environment =========="
glance = _get_glance_client()
@@ -118,25 +134,31 @@ def vstf_env_cleanup():
heat.stacks.delete(stack.id)
timeInProgress = 0
- while vstf_stack_satisfy(name="bottlenecks_vstf_stack", status=None) and timeInProgress < 60:
+ while vstf_stack_satisfy(
+ name="bottlenecks_vstf_stack",
+ status=None) and timeInProgress < 60:
time.sleep(5)
timeInProgress = timeInProgress + 5
- if vstf_stack_satisfy(name="bottlenecks_vstf_stack", status=None) == True:
+ if vstf_stack_satisfy(name="bottlenecks_vstf_stack", status=None):
print "Failed to clean the stack"
return False
else:
return True
+
def vstf_create_images(imagefile=None, image_name="bottlenecks_vstf_image"):
print "========== Create vstf image in OS =========="
- if imagefile == None:
- print "imagefile not set/found"
- return False
+ if imagefile is None:
+ print "imagefile not set/found"
+ return False
glance = _get_glance_client()
- image = glance.images.create(name=image_name, disk_format="qcow2", container_format="bare")
+ image = glance.images.create(
+ name=image_name,
+ disk_format="qcow2",
+ container_format="bare")
with open(imagefile) as fimage:
glance.images.upload(image.id, fimage)
@@ -148,50 +170,65 @@ def vstf_create_images(imagefile=None, image_name="bottlenecks_vstf_image"):
timeInQueue = timeInQueue + 1
img_status = glance.images.get(image.id).status
- print "After %d seconds, the image's status is [%s]" %(timeInQueue, img_status)
+ print "After %d seconds, the image's status is [%s]" % (timeInQueue, img_status)
return True if img_status == "active" else False
+
def vstf_create_keypairs(key_path, name="bottlenecks_vstf_keypair"):
print "========== Add vstf keypairs in OS =========="
nova = _get_nova_client()
with open(key_path) as pkey:
nova.keypairs.create(name=name, public_key=pkey.read())
-def vstf_create_flavors(name="bottlenecks_vstf_flavor", ram=4096, vcpus=2, disk=10):
+
+def vstf_create_flavors(
+ name="bottlenecks_vstf_flavor",
+ ram=4096,
+ vcpus=2,
+ disk=10):
print "========== Create vstf flavors in OS =========="
nova = _get_nova_client()
nova.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk)
-def vstf_create_instances(template_file, vstf_parameters=None, stack_name="bottlenecks_vstf_stack"):
+
+def vstf_create_instances(
+ template_file,
+ vstf_parameters=None,
+ stack_name="bottlenecks_vstf_stack"):
print "========== Create vstf instances =========="
heat = _get_heat_client()
with open(template_file) as template:
- stack = heat.stacks.create(stack_name=stack_name, template=template.read(), parameters=vstf_parameters)
+ stack = heat.stacks.create(
+ stack_name=stack_name,
+ template=template.read(),
+ parameters=vstf_parameters)
stack_id = stack['stack']['id']
stack_status = heat.stacks.get(stack_id).stack_status
print "Created stack, id=" + str(stack_id) + ", status=" + str(stack_status)
- timeInProgress= 0
+ timeInProgress = 0
while stack_status == "CREATE_IN_PROGRESS" and timeInProgress < 150:
- print " stack's status: %s, after %d seconds" %(stack_status, timeInProgress)
+ print " stack's status: %s, after %d seconds" % (stack_status, timeInProgress)
time.sleep(5)
timeInProgress = timeInProgress + 5
stack_status = heat.stacks.get(stack_id).stack_status
- print "After %d seconds, the stack's status is [%s]" %(timeInProgress, stack_status)
+ print "After %d seconds, the stack's status is [%s]" % (timeInProgress, stack_status)
return True if stack_status == "CREATE_COMPLETE" else False
+
def get_instances(nova_client):
try:
instances = nova_client.servers.list(search_opts={'all_tenants': 1})
return instances
- except Exception, e:
+ except Exception as e:
print "Error [get_instances(nova_client)]:", e
return None
+
def vstf_run(launch_file=None, test_file=None):
print "================run vstf==============="
@@ -204,12 +241,12 @@ def vstf_run(launch_file=None, test_file=None):
subprocess.call("nova list", shell=True)
time.sleep(100)
instances = get_instances(nova)
- if instances == None:
+ if instances is None:
print "Found *None* instances, exit vstf_run()!"
return False
- if launch_file == None or test_file == None:
- print "Error, vstf launch/test file not given"
- return False
+ if launch_file is None or test_file is None:
+ print "Error, vstf launch/test file not given"
+ return False
cmd = "bash " + launch_file
subprocess.call(cmd, shell=True)
time.sleep(50)
@@ -217,66 +254,80 @@ def vstf_run(launch_file=None, test_file=None):
subprocess.call(cmd, shell=True)
time.sleep(20)
+
def main():
- Bottlenecks_repo_dir = "/home/opnfv/bottlenecks" # same in Dockerfile, docker directory
- Heat_template = Bottlenecks_repo_dir + "/testsuites/vstf/testcase_cfg/vstf_heat_template.yaml"
+ # same in Dockerfile, docker directory
+ Bottlenecks_repo_dir = "/home/opnfv/bottlenecks"
+ Heat_template = Bottlenecks_repo_dir + \
+ "/testsuites/vstf/testcase_cfg/vstf_heat_template.yaml"
manager_image_url = 'http://artifacts.opnfv.org/bottlenecks/vstf-manager-new.img'
agent_image_url = 'http://artifacts.opnfv.org/bottlenecks/vstf-agent-new.img'
- #vstf_env_prepare(testcase_cfg)
+ # vstf_env_prepare(testcase_cfg)
vstf_env_cleanup()
dest_dir = "/tmp"
manager_file = _download_url(manager_image_url, dest_dir)
- if manager_file == None:
- print "error with downloading image(s)"
- exit(-1)
+ if manager_file is None:
+ print "error with downloading image(s)"
+ exit(-1)
agent_file = _download_url(agent_image_url, dest_dir)
- if agent_file == None:
- print "error with downloading image(s)"
- exit(-1)
+ if agent_file is None:
+ print "error with downloading image(s)"
+ exit(-1)
- #TO DO:the parameters are all used defaults here, it should be changed depends on what it is really named
- parameters={'key_name': 'bottlenecks_vstf_keypair',
- 'flavor': 'bottlenecks_vstf_flavor',
- 'public_net': os.environ.get('EXTERNAL_NET')}
+ # TO DO:the parameters are all used defaults here, it should be changed
+ # depends on what it is really named
+ parameters = {'key_name': 'bottlenecks_vstf_keypair',
+ 'flavor': 'bottlenecks_vstf_flavor',
+ 'public_net': os.environ.get('EXTERNAL_NET')}
print "Heat_template_file: " + Heat_template
print "parameters:\n" + str(parameters)
if not (args.conf):
- logger.error("Configuration files are not set for testcase")
- exit(-1)
+ logger.error("Configuration files are not set for testcase")
+ exit(-1)
else:
- testcase_cfg = args.conf
+ testcase_cfg = args.conf
manager_image_created = False
tester_image_created = False
target_image_created = False
stack_created = False
- manager_image_created = vstf_create_images(imagefile=manager_file, image_name="bottlenecks_vstf_manager")
- tester_image_created = vstf_create_images(imagefile=agent_file, image_name="bottlenecks_vstf_tester")
- target_image_created = vstf_create_images(imagefile=agent_file, image_name="bottlenecks_vstf_target")
- keyPath = Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key.pub"
+ manager_image_created = vstf_create_images(
+ imagefile=manager_file,
+ image_name="bottlenecks_vstf_manager")
+ tester_image_created = vstf_create_images(
+ imagefile=agent_file, image_name="bottlenecks_vstf_tester")
+ target_image_created = vstf_create_images(
+ imagefile=agent_file, image_name="bottlenecks_vstf_target")
+ keyPath = Bottlenecks_repo_dir + \
+ "/utils/infra_setup/bottlenecks_key/bottlenecks_key.pub"
vstf_create_keypairs(key_path=keyPath)
vstf_create_flavors()
- if manager_image_created == True and tester_image_created == True and target_image_created == True:
- stack_created = vstf_create_instances(template_file=Heat_template, vstf_parameters=parameters, stack_name="bottlenecks_vstf_stack")
+ if manager_image_created and tester_image_created and target_image_created:
+ stack_created = vstf_create_instances(
+ template_file=Heat_template,
+ vstf_parameters=parameters,
+ stack_name="bottlenecks_vstf_stack")
else:
print "Cannot create instances, as Failed to create image(s)."
- exit (-1)
+ exit(-1)
print "Wait 300 seconds after stack creation..."
time.sleep(300)
- launchfile = Bottlenecks_repo_dir + "/utils/infra_setup/heat_template/vstf_heat_template/launch_vstf.sh"
- testfile = Bottlenecks_repo_dir + "/utils/infra_setup/heat_template/vstf_heat_template/vstf_test.sh"
+ launchfile = Bottlenecks_repo_dir + \
+ "/utils/infra_setup/heat_template/vstf_heat_template/launch_vstf.sh"
+ testfile = Bottlenecks_repo_dir + \
+ "/utils/infra_setup/heat_template/vstf_heat_template/vstf_test.sh"
vstf_run(launch_file=launchfile, test_file=testfile)
vstf_env_cleanup()
-if __name__=='__main__':
+if __name__ == '__main__':
main()
diff --git a/testsuites/vstf/vstf_collector.py b/testsuites/vstf/vstf_collector.py
index 7206e32d..af11bc66 100755
--- a/testsuites/vstf/vstf_collector.py
+++ b/testsuites/vstf/vstf_collector.py
@@ -14,7 +14,9 @@ import logging
LOG = logging.getLogger(__name__)
+
class Uploader(object):
+
def __init__(self, conf):
self.headers = {'Content-type': 'application/json'}
self.timeout = 5
@@ -36,12 +38,18 @@ class Uploader(object):
self.result["case_name"] = case_name
self.result["details"] = raw_data
try:
- LOG.debug('Result to be uploaded:\n %s' % json.dumps(self.result, indent=4))
+ LOG.debug(
+ 'Result to be uploaded:\n %s' %
+ json.dumps(
+ self.result,
+ indent=4))
res = requests.post(self.target,
data=json.dumps(self.result),
headers=self.headers,
timeout=self.timeout)
- print('Test result posting finished with status code %d.' % res.status_code)
+ print(
+ 'Test result posting finished with status code %d.' %
+ res.status_code)
except Exception as err:
LOG.error('Failed to record result data: %s', err)
@@ -49,8 +57,14 @@ class Uploader(object):
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
- parser.add_argument('--config', required=True, help="basic config file for uploader, json format.")
- parser.add_argument('--dir', required=True, help="result files for test cases")
+ parser.add_argument(
+ '--config',
+ required=True,
+ help="basic config file for uploader, json format.")
+ parser.add_argument(
+ '--dir',
+ required=True,
+ help="result files for test cases")
args = parser.parse_args()
realpath = os.path.realpath(args.dir)
for filename in os.listdir(args.dir):
@@ -58,4 +72,9 @@ if __name__ == "__main__":
LOG.debug("uploading test result from file:%s", filepath)
with open(filepath) as stream:
result = eval(stream.read())
- Uploader(args.config).upload_result(filename.lower().replace('-', ''), result)
+ Uploader(
+ args.config).upload_result(
+ filename.lower().replace(
+ '-',
+ ''),
+ result)
diff --git a/testsuites/vstf/vstf_scripts/vstf/__init__.py b/testsuites/vstf/vstf_scripts/vstf/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/agent.py b/testsuites/vstf/vstf_scripts/vstf/agent/agent.py
index b5745995..4d92c510 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/agent.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/agent.py
@@ -40,17 +40,17 @@ stc_opts = [
class Client(daemon.Daemon):
"""This is a consumer of vstf-agent which will create two channel to the
rabbitmq-server, one for direct call, one for fan call.
-
+
agent start with a config file which record rabbitmq's ip, port and user passwd
also each agent has its own id.
-
+
"""
def __init__(self, agent, config_file):
"""Record the config file, init the daemon.
-
+
:param str config_file: the config of a VSTF agent.
-
+
"""
super(Client, self).__init__('/tmp/esp_rpc_client.pid')
self.config_file = config_file
@@ -61,7 +61,7 @@ class Client(daemon.Daemon):
def init_config(self):
"""Use olso.config to analyse the config file
-
+
"""
parser = CfgParser(self.config_file)
parser.register_my_opts(server_opts, "rabbit")
@@ -80,7 +80,7 @@ class Client(daemon.Daemon):
def run(self):
"""Run the rabbitmq consumers as a daemon.
-
+
"""
signal.signal(signal.SIGTERM, self.process_exit)
self.loop_thread()
@@ -90,7 +90,7 @@ class Client(daemon.Daemon):
"""This function try to stop the agent after running agent stop.
When we call vstf-agent stop which will send a signal SIGTERM to agent
When the agent catch the SIGTERM signal will call this function.
-
+
"""
LOG.info("daemon catch the signalterm, start to stop the process.")
self.run_flag = False
@@ -104,7 +104,7 @@ class Client(daemon.Daemon):
def stop_agent(self):
"""Notice that: this function just kill the agent by pid file, it has
none vars of the agent.
-
+
"""
LOG.info("call daemon stop.")
# kill the main thread
@@ -120,9 +120,11 @@ def main():
default="soft",
choices=["soft", "spirent"],
help="the agent type, as now, just soft and spirent")
- parser.add_argument('--config_file', action='store',
- default="/etc/vstf/amqp/amqp.ini",
- help="some env_build params recorded in the config file")
+ parser.add_argument(
+ '--config_file',
+ action='store',
+ default="/etc/vstf/amqp/amqp.ini",
+ help="some env_build params recorded in the config file")
args = parser.parse_args()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/__init__.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/__init__.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/collect.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/collect.py
index 126a7d55..1d39d7b7 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/collect.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/collect.py
@@ -31,11 +31,11 @@ class Collect(object):
"""the base _system info
{'os info':{'_system':'ubuntu', 'kernel': '3.13.3'}}"""
return {const.OS_INFO:
- {
- '_system': open('/etc/issue.net').readline().strip(),
- 'kernel': platform.uname()[2]
- }
- }
+ {
+ '_system': open('/etc/issue.net').readline().strip(),
+ 'kernel': platform.uname()[2]
+ }
+ }
def _memery(self):
""" Return the information in /proc/meminfo
@@ -46,11 +46,11 @@ class Collect(object):
meminfo[line.split(':')[0]] = line.split(':')[1].strip()
return {const.MEMORY_INFO:
- {
- "Mem Total": meminfo['MemTotal'],
- "Mem Swap": meminfo['SwapTotal']
- }
- }
+ {
+ "Mem Total": meminfo['MemTotal'],
+ "Mem Swap": meminfo['SwapTotal']
+ }
+ }
def _lscpu(self):
ret = {}
@@ -68,18 +68,19 @@ class Collect(object):
ret.append(cpuinfo)
cpuinfo = OrderedDict()
elif len(line.split(':')) == 2:
- cpuinfo[line.split(':')[0].strip()] = line.split(':')[1].strip()
+ cpuinfo[line.split(':')[0].strip()] = line.split(':')[
+ 1].strip()
else:
log.error("_cpu info unknow format <%(c)s>", {'c': line})
return {const.CPU_INFO:
- dict(
- {
- "Model Name": ret[0]['model name'],
- "Address sizes": ret[0]['address sizes']
- },
- **(self._lscpu())
- )
- }
+ dict(
+ {
+ "Model Name": ret[0]['model name'],
+ "Address sizes": ret[0]['address sizes']
+ },
+ **(self._lscpu())
+ )
+ }
def _hw_sysinfo(self):
cmdline = "dmidecode | grep -A 2 'System Information' | grep -v 'System Information'"
@@ -90,14 +91,15 @@ class Collect(object):
for tmp in output.strip().split('\n'):
if tmp is None or tmp is "":
continue
- # split the items
+ # split the items
tmp = tmp.split(":")
if len(tmp) >= 2:
# first item as key, and the other as value
result[tmp[0].strip("\t")] = ";".join(tmp[1:])
return {const.HW_INFO: result}
else:
- return {const.HW_INFO: "get hw info failed. check the host by cmd: dmidecode"}
+ return {
+ const.HW_INFO: "get hw info failed. check the host by cmd: dmidecode"}
def collect_host_info(self):
return [self._system, self._cpu, self._memery(), self._hw_sysinfo()]
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/commandline.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/commandline.py
index e4df9b27..29dd2c02 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/commandline.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/commandline.py
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
class CommandLine(object):
+
def __init__(self):
super(CommandLine, self).__init__()
self.proc = None
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/device_manager.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/device_manager.py
index 8b5387fe..c34f5e06 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/device_manager.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/device_manager.py
@@ -21,6 +21,7 @@ default_drivers = {
class LspciHelper(object):
+
def __init__(self):
self.bdf_desc_map = {}
self.bdf_device_map = {}
@@ -45,7 +46,8 @@ class LspciHelper(object):
for bdf, desc in self.bdf_desc_map.items():
device = get_device_name(bdf)
if device is None:
- LOG.info("cann't find device name for bdf:%s, no driver is available.", bdf)
+ LOG.info(
+ "cann't find device name for bdf:%s, no driver is available.", bdf)
try:
self._load_driver(desc)
except:
@@ -66,13 +68,17 @@ class LspciHelper(object):
def _get_ip_macs(self):
for device, bdf in self.device_bdf_map.items():
buf = check_output("ip addr show dev %s" % device, shell=True)
- macs = re.compile("[A-F0-9]{2}(?::[A-F0-9]{2}){5}", re.IGNORECASE | re.MULTILINE)
+ macs = re.compile(
+ "[A-F0-9]{2}(?::[A-F0-9]{2}){5}",
+ re.IGNORECASE | re.MULTILINE)
for mac in macs.findall(buf):
if mac.lower() in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
continue
else:
break
- ips = re.compile(r"inet (\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}/\d{1,2})", re.MULTILINE)
+ ips = re.compile(
+ r"inet (\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}/\d{1,2})",
+ re.MULTILINE)
ip = ips.findall(buf)
if ip:
self.bdf_ip_map[bdf] = ip[0]
@@ -93,6 +99,7 @@ class LspciHelper(object):
class DeviceManager(object):
+
def __init__(self):
super(DeviceManager, self).__init__()
mgr = netns.NetnsManager()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/image_manager.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/image_manager.py
index c3b5c6b3..4bae49d2 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/image_manager.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/image_manager.py
@@ -19,6 +19,7 @@ class _ImageManager(object):
A qemu-img wrapper to create qcow2 child image from a parent image.
"""
+
def __init__(self, parent_image_path, child_image_dir):
"""
:param parent_image_path str: the parent image path.
@@ -31,7 +32,11 @@ class _ImageManager(object):
assert os.path.isfile(self.parent_image_path)
assert os.path.isdir(self.child_image_dir)
- def create_child_image(self, child_name, full_clone=False, image_type='qcow2'):
+ def create_child_image(
+ self,
+ child_name,
+ full_clone=False,
+ image_type='qcow2'):
"""
create a child image and put it in self.child_image_dir.
@@ -39,16 +44,25 @@ class _ImageManager(object):
:return: return the path of child image.
"""
- image_path = os.path.join(self.child_image_dir, child_name) + '.' + image_type
+ image_path = os.path.join(
+ self.child_image_dir,
+ child_name) + '.' + image_type
if full_clone:
- cmd = self._convert_str % {'image_type': image_type, 'child_path': image_path, 'parent_path': self.parent_image_path}
+ cmd = self._convert_str % {
+ 'image_type': image_type,
+ 'child_path': image_path,
+ 'parent_path': self.parent_image_path}
else:
- cmd = self._create_child_str % {'child_path': image_path, 'parent_path': self.parent_image_path, 'image_type':image_type}
+ cmd = self._create_child_str % {
+ 'child_path': image_path,
+ 'parent_path': self.parent_image_path,
+ 'image_type': image_type}
check_call(cmd.split())
return image_path
class ImageManager(object):
+
def __init__(self, cfg):
"""
ImageManager creates images from configuration context.
@@ -74,13 +88,22 @@ class ImageManager(object):
@staticmethod
def _check_cfg(cfg):
- for key in ('parent_image', 'dst_location', 'full_clone', 'type', 'names'):
+ for key in (
+ 'parent_image',
+ 'dst_location',
+ 'full_clone',
+ 'type',
+ 'names'):
if key not in cfg:
raise Exception("does't find %s config" % key)
if cfg['type'] not in ('raw', 'qcow2'):
- raise Exception("type:%s not supported, only support 'raw' and 'qcow2'" % cfg['type'])
+ raise Exception(
+ "type:%s not supported, only support 'raw' and 'qcow2'" %
+ cfg['type'])
if not cfg['full_clone'] and cfg['type'] == 'raw':
- raise Exception("only support 'qcow2' for not full_clone image creation" % cfg['type'])
+ raise Exception(
+ "only support 'qcow2' for not full_clone image creation" %
+ cfg['type'])
return cfg
def create_all(self):
@@ -90,7 +113,8 @@ class ImageManager(object):
:return: True for success, False for failure.
"""
for name in self.names:
- image = self.mgr.create_child_image(name, self.full_clone, self.image_type)
+ image = self.mgr.create_child_image(
+ name, self.full_clone, self.image_type)
LOG.info("image: %s created", image)
return True
@@ -101,7 +125,8 @@ class ImageManager(object):
:return: True for success. Raise exception otherwise.
"""
for name in self.names:
- image_path = os.path.join(self.image_dir, name + '.' + self.image_type)
+ image_path = os.path.join(
+ self.image_dir, name + '.' + self.image_type)
try:
os.unlink(image_path)
LOG.info("remove:%s successfully", image_path)
@@ -114,7 +139,12 @@ if __name__ == '__main__':
import argparse
import json
parser = argparse.ArgumentParser()
- parser.add_argument('action', choices = ('create','clean'), help='action:create|clean')
+ parser.add_argument(
+ 'action',
+ choices=(
+ 'create',
+ 'clean'),
+ help='action:create|clean')
parser.add_argument('--config', help='config file to parse')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
@@ -124,5 +154,3 @@ if __name__ == '__main__':
mgr.create_all()
if args.action == 'clean':
mgr.clean_all()
-
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/source_manager.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/source_manager.py
index 6edd14ca..5aca5368 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/source_manager.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/source_manager.py
@@ -27,6 +27,7 @@ def my_chdir(file_path):
class SourceCodeManager(object):
+
def __init__(self):
super(SourceCodeManager, self).__init__()
self.base_path = '/opt/vstf/'
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm9pfs.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm9pfs.py
index 7364f8b2..4b7b31b1 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm9pfs.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm9pfs.py
@@ -69,7 +69,9 @@ class VMConfigBy9pfs(object):
return ret == constant.VM_CMD_EXCUTE_SUCCES_FLAG_CONTENT
def _wait_command_done(self):
- done = self._wait_flag_file_to_exist(constant.VM_CMD_DONE_FLAG_FILE, constant.VM_COMMON_CMD_EXCUTE_TIME_OUT)
+ done = self._wait_flag_file_to_exist(
+ constant.VM_CMD_DONE_FLAG_FILE,
+ constant.VM_COMMON_CMD_EXCUTE_TIME_OUT)
if done:
return self._get_cmd_return_code()
else:
@@ -86,7 +88,8 @@ class VMConfigBy9pfs(object):
raise Exception("9pfs command failure: timeout.")
def wait_up(self):
- return self._wait_flag_file_to_exist(constant.VM_UP_Flag_FILE, constant.VM_UP_TIME_OUT)
+ return self._wait_flag_file_to_exist(
+ constant.VM_UP_Flag_FILE, constant.VM_UP_TIME_OUT)
def config_ip(self, mac, ip):
cmd = 'config_ip %s %s' % (mac, ip)
@@ -118,7 +121,13 @@ class VMConfigBy9pfs(object):
cmd = 'recover_nic_binding ' + mac_str
return self._set_cmd(cmd)
- def config_amqp(self, identity, server, port=5672, user="guest", passwd="guest"):
+ def config_amqp(
+ self,
+ identity,
+ server,
+ port=5672,
+ user="guest",
+ passwd="guest"):
data = {
'server': server,
'port': port,
@@ -135,7 +144,7 @@ class VMConfigBy9pfs(object):
id=%(id)s''' % data
file_name = "amqp.ini"
dedented_text = textwrap.dedent(content)
- self._write(file_name, header+dedented_text)
+ self._write(file_name, header + dedented_text)
cmd = 'config_amqp %s' % file_name
return self._set_cmd(cmd)
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_manager.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_manager.py
index 60a3b37b..d0a2060d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_manager.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_manager.py
@@ -93,8 +93,14 @@ class VMControlOperation(object):
@staticmethod
def check_required_options(context):
- for key in ('vm_name', 'vm_memory', 'vm_cpu', 'image_path', 'image_type', 'taps'):
- if not context.has_key(key):
+ for key in (
+ 'vm_name',
+ 'vm_memory',
+ 'vm_cpu',
+ 'image_path',
+ 'image_type',
+ 'taps'):
+ if key not in context:
raise Exception("vm config error, must set %s option" % key)
def set_vm_defaults(self, context):
@@ -117,14 +123,18 @@ class VMControlOperation(object):
context.setdefault(k, v)
def _shutdown_vm(self):
- out = check_output("virsh list | sed 1,2d | awk '{print $2}'", shell=True)
+ out = check_output(
+ "virsh list | sed 1,2d | awk '{print $2}'",
+ shell=True)
vm_set = set(out.split())
for vm in vm_set:
check_call("virsh shutdown %s" % vm, shell=True)
timeout = 60
# wait for gracefully shutdown
while timeout > 0:
- out = check_output("virsh list | sed 1,2d | awk '{print $2}'", shell=True)
+ out = check_output(
+ "virsh list | sed 1,2d | awk '{print $2}'",
+ shell=True)
vm_set = set(out.split())
if len(vm_set) == 0:
break
@@ -135,7 +145,9 @@ class VMControlOperation(object):
for vm in vm_set:
check_call("virsh destroy %s" % vm, shell=True)
# undefine all
- out = check_output("virsh list --all | sed 1,2d | awk '{print $2}'", shell=True)
+ out = check_output(
+ "virsh list --all | sed 1,2d | awk '{print $2}'",
+ shell=True)
vm_set = set(out.split())
for vm in vm_set:
check_call("virsh undefine %s" % vm, shell=True)
@@ -177,7 +189,8 @@ class VMControlOperation(object):
vm9pctrl = self.vm_9p_controllers[vm_name]
ret = vm9pctrl.wait_up()
if ret not in (True,):
- raise Exception('vm running but stuck in boot process, please manully check.')
+ raise Exception(
+ 'vm running but stuck in boot process, please manully check.')
LOG.debug('waitVM %s up ok, ret:%s', vm_name, ret)
return True
@@ -193,12 +206,14 @@ class VMControlOperation(object):
# print self.vm_9p_controllers
init_cfg = vm_cfg['init_config']
if "ctrl_ip_setting" in init_cfg:
- ret = vm9pctrl.config_ip(vm_cfg['ctrl_mac'], init_cfg['ctrl_ip_setting'])
- assert ret == True
+ ret = vm9pctrl.config_ip(
+ vm_cfg['ctrl_mac'],
+ init_cfg['ctrl_ip_setting'])
+ assert ret
LOG.info('initConfigVM config ip ok')
if 'ctrl_gw' in init_cfg:
ret = vm9pctrl.config_gw(init_cfg['ctrl_gw'])
- assert ret == True
+ assert ret
LOG.info('initConfigVM ctrl_gw ok')
if "ctrl_ip_setting" in init_cfg and "amqp_server" in init_cfg:
identity = init_cfg['ctrl_ip_setting'].split('/')[0]
@@ -209,7 +224,7 @@ class VMControlOperation(object):
user = init_cfg['amqp_user']
passwd = init_cfg['amqp_passwd']
ret = vm9pctrl.config_amqp(identity, server, port, user, passwd)
- assert ret == True
+ assert ret
LOG.info('initConfigVM config_amqp ok')
if 'tap_pktloop_config' in init_cfg:
taps = vm_cfg['taps']
@@ -217,6 +232,6 @@ class VMControlOperation(object):
for tap in taps:
macs.append(tap['tap_mac'])
ret = vm9pctrl.set_pktloop_dpdk(macs)
- assert ret == True
+ assert ret
LOG.info('initConfigVM set_pktloop_dpdk ok')
return True
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_xml_help.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_xml_help.py
index 6f9131e7..89c10963 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_xml_help.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_xml_help.py
@@ -38,7 +38,7 @@ xml_disk = '''
<source file='IMAGE_PATH'/>
<target dev='vda' bus='virtio'/>
</disk>'''
-
+
xml_ctrl_br = '''
<interface type='bridge'>
<mac address='CTRL_MAC'/>
@@ -63,7 +63,7 @@ xml_br = '''
<model type='virtio'/>
<target dev='TAP_NAME'/>
</interface>'''
-
+
xml_pci = '''
<hostdev mode='subsystem' type='pci' managed='yes'>
<driver name='kvm'/>
@@ -82,4 +82,3 @@ xml_tail = '''
</graphics>
</devices>
</domain>'''
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/builder.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/builder.py
index a66a8873..19bf12f2 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/builder.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/builder.py
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
class PluginManager(object):
+
def __init__(self):
self.instance = None
self.saved = {}
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/__init__.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/__init__.py
index fc9802be..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/__init__.py
@@ -5,4 +5,4 @@
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-############################################################################## \ No newline at end of file
+##############################################################################
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/manager.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/manager.py
index 6f895656..e20b5dd5 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/manager.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/manager.py
@@ -11,9 +11,11 @@ import stevedore
class DriverPluginManager(object):
+
def __init__(self):
self.plugins = {}
- self.mgr = stevedore.extension.ExtensionManager(namespace="drivers.plugins", invoke_on_load=True)
+ self.mgr = stevedore.extension.ExtensionManager(
+ namespace="drivers.plugins", invoke_on_load=True)
def load(self, drivers):
plugin = self.determine_driver_type(drivers)
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/model.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/model.py
index ddc07449..807143f0 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/model.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/model.py
@@ -22,16 +22,16 @@ class DriverPlugin:
@abstractmethod
def clean(self):
"""implement this clean function to clean environment before and after calling any other functions.
-
+
"""
pass
@abstractmethod
def load(self, drivers):
"""load driver modules.
-
+
:param list drivers:list of modules to be inserted. for example:[ixgbe,vhost_net]
-
+
"""
pass
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/origin_driver.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/origin_driver.py
index bf3c15c8..2004b8e8 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/origin_driver.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/origin_driver.py
@@ -24,7 +24,7 @@ class OriginDriverPlugin(model.DriverPlugin):
def clean(self):
"""clean drivers list in self.origin_drivers.
-
+
"""
for mod in self.origin_drivers:
check_and_rmmod(mod)
@@ -34,7 +34,7 @@ class OriginDriverPlugin(model.DriverPlugin):
def load(self, drivers):
"""insmod drivers
-
+
:param list drivers:list of drivers link ['ixgbe','vhost_net']
"""
# load implicit 'tun' module dependency for vhost_net
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/FSMonitor.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/FSMonitor.py
index e6559362..53cddebb 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/FSMonitor.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/FSMonitor.py
@@ -23,6 +23,7 @@ LOG = logging.getLogger('__name__')
class VMOperation(object):
+
def __init__(self):
self.RTE_SDK = '/home/dpdk-2.0.0'
self.RTE_TARGET = 'x86_64-native-linuxapp-gcc'
@@ -46,7 +47,8 @@ class VMOperation(object):
for mac in tap_macs:
bdf = self.ip_helper.mac_bdf_map[mac]
bdf_str = bdf_str + ' ' + bdf
- cmd = 'python %s/tools/dpdk_nic_bind.py --bind=virtio-pci %s' % (self.RTE_SDK, bdf_str)
+ cmd = 'python %s/tools/dpdk_nic_bind.py --bind=virtio-pci %s' % (
+ self.RTE_SDK, bdf_str)
LOG.debug("recover_nic_binding runs cmd = %s", cmd)
check_call(cmd, shell=True)
@@ -60,15 +62,22 @@ class VMOperation(object):
check_call("mount -t hugetlbfs nodev /mnt/huge", shell=True)
check_call("modprobe uio", shell=True)
check_and_rmmod('igb_uio')
- check_call("insmod %s/%s/kmod/igb_uio.ko" % (RTE_SDK, RTE_TARGET), shell=True)
+ check_call(
+ "insmod %s/%s/kmod/igb_uio.ko" %
+ (RTE_SDK, RTE_TARGET), shell=True)
bdf_str = ''
for mac in tap_macs:
bdf = self.ip_helper.mac_bdf_map[mac]
bdf_str = bdf_str + ' ' + bdf
- check_call('python %s/tools/dpdk_nic_bind.py --bind=igb_uio %s' % (RTE_SDK, bdf_str), shell=True)
- cpu_num = int(check_output('cat /proc/cpuinfo | grep processor | wc -l', shell=True))
+ check_call(
+ 'python %s/tools/dpdk_nic_bind.py --bind=igb_uio %s' %
+ (RTE_SDK, bdf_str), shell=True)
+ cpu_num = int(
+ check_output(
+ 'cat /proc/cpuinfo | grep processor | wc -l',
+ shell=True))
cpu_bit_mask = 0
i = cpu_num
while i:
@@ -76,14 +85,7 @@ class VMOperation(object):
i -= 1
cpu_bit_mask = hex(cpu_bit_mask)
cmd = "%s/%s/app/testpmd -c %s -n %d -- --disable-hw-vlan --disable-rss --nb-cores=%d --rxq=%d --txq=%d --rxd=4096 --txd=4096" % (
- RTE_SDK,
- RTE_TARGET,
- cpu_bit_mask,
- cpu_num / 2,
- cpu_num - 1,
- (cpu_num - 1) / 2,
- (cpu_num - 1) / 2
- )
+ RTE_SDK, RTE_TARGET, cpu_bit_mask, cpu_num / 2, cpu_num - 1, (cpu_num - 1) / 2, (cpu_num - 1) / 2)
LOG.info("set_pktloop_dpdk runs cmd = %s", cmd)
p = subprocess.Popen(cmd.split())
if not p.poll():
@@ -105,6 +107,7 @@ class VMOperation(object):
class FSMonitor(object):
+
def __init__(self, pidfile=None, interval=1):
if pidfile:
self.pidfile = pidfile
@@ -121,8 +124,9 @@ class FSMonitor(object):
pass
def kill_old(self):
- out = check_output("ps -ef | grep -v grep | egrep 'python.*%s' | awk '{print $2}'" % sys.argv[0],
- shell=True)
+ out = check_output(
+ "ps -ef | grep -v grep | egrep 'python.*%s' | awk '{print $2}'" %
+ sys.argv[0], shell=True)
if out:
for pid in out.split():
if int(pid) != os.getpid():
@@ -131,7 +135,8 @@ class FSMonitor(object):
def set_fail(self, failed_reason):
with open(constant.VM_CMD_RETURN_CODE_FILE, 'w') as f:
- f.writelines([constant.VM_CMD_EXCUTE_FAILED_FLAG_CONTENT, '\n', failed_reason])
+ f.writelines(
+ [constant.VM_CMD_EXCUTE_FAILED_FLAG_CONTENT, '\n', failed_reason])
with open(constant.VM_CMD_DONE_FLAG_FILE, 'w') as f:
pass
@@ -149,8 +154,10 @@ class FSMonitor(object):
pid = os.fork()
if pid > 0:
sys.exit(0)
- except OSError, e:
- sys.stderr.write('fork #1 failed:%d,(%s)\n' % (e.errno, e.strerror))
+ except OSError as e:
+ sys.stderr.write(
+ 'fork #1 failed:%d,(%s)\n' %
+ (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.umask(0)
@@ -158,10 +165,17 @@ class FSMonitor(object):
pid = os.fork()
if pid > 0:
sys.exit(0)
- except OSError, e:
- sys.stderr.write('fork #2 failed:%d,(%s)\n' % (e.errno, e.strerror))
+ except OSError as e:
+ sys.stderr.write(
+ 'fork #2 failed:%d,(%s)\n' %
+ (e.errno, e.strerror))
sys.exit(1)
- LOG.debug("pid:%d,ppid:%d,sid:%d", os.getpid(), os.getppid(), os.getsid(os.getpid()))
+ LOG.debug(
+ "pid:%d,ppid:%d,sid:%d",
+ os.getpid(),
+ os.getppid(),
+ os.getsid(
+ os.getpid()))
old = open('/dev/null', 'r')
os.dup2(old.fileno(), sys.stdin.fileno())
old = open('/dev/null', 'a+')
@@ -192,8 +206,9 @@ class FSMonitor(object):
method(*param)
self.set_success()
LOG.debug("cmd sucessfully done")
- except Exception, e:
- LOG.debug('failed to run:%s %s,reason:%s', cmd, param, str(e))
+ except Exception as e:
+ LOG.debug(
+ 'failed to run:%s %s,reason:%s', cmd, param, str(e))
self.set_fail(str(e))
break
else:
@@ -209,7 +224,8 @@ if __name__ == '__main__':
# echo "config_ip 56:6f:44:a5:3f:a2 192.168.188.200/23" > command;touch command_set
# echo "config_gw 192.168.188.1" > command;touch command_set
# echo set_pktloop_dpdk 56:6f:44:a5:3f:a2 56:6f:44:a5:3f:a3 > command;touch command_set
- # echo recover_nic_binding 56:6f:44:a5:3f:a2 56:6f:44:a5:3f:a3 > command;touch command_set
+ # echo recover_nic_binding 56:6f:44:a5:3f:a2 56:6f:44:a5:3f:a3 >
+ # command;touch command_set
import os
logging.basicConfig(level=logging.DEBUG, filename=LOG_FILE, filemode='w')
os.environ['PATH'] = os.environ["PATH"] + ":/usr/local/bin"
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/constant.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/constant.py
index 33b37eb4..3ae80a39 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/constant.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/constant.py
@@ -18,4 +18,4 @@ VM_CMD_EXCUTE_FAILED_FLAG_CONTENT = 'fail'
VM_CMD_NOT_FOUND = 'comamnd_not_found'
VM_UP_TIME_OUT = 120
VM_COMMON_CMD_EXCUTE_TIME_OUT = 10
-FS_MOUNT_POINT = '/mnt/9pfs' \ No newline at end of file
+FS_MOUNT_POINT = '/mnt/9pfs'
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/utils.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/utils.py
index 5bdb4159..c28b6ec6 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/utils.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/utils.py
@@ -70,6 +70,7 @@ def umount(path):
class IPCommandHelper(object):
+
def __init__(self):
self.devices = []
self.macs = []
@@ -80,7 +81,9 @@ class IPCommandHelper(object):
self.mac_bdf_map = {}
self.bdf_mac_map = {}
buf = check_output("ip link", shell=True)
- macs = re.compile("[A-F0-9]{2}(?::[A-F0-9]{2}){5}", re.IGNORECASE | re.MULTILINE)
+ macs = re.compile(
+ "[A-F0-9]{2}(?::[A-F0-9]{2}){5}",
+ re.IGNORECASE | re.MULTILINE)
for mac in macs.findall(buf):
if mac.lower() in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
continue
@@ -95,7 +98,10 @@ class IPCommandHelper(object):
self.mac_device_map[mac] = device
for device in self.devices:
buf = check_output("ethtool -i %s" % device, shell=True)
- bdfs = re.findall(r'^bus-info: \d{4}:(\d{2}:\d{2}\.\d*)$', buf, re.MULTILINE)
+ bdfs = re.findall(
+ r'^bus-info: \d{4}:(\d{2}:\d{2}\.\d*)$',
+ buf,
+ re.MULTILINE)
if bdfs:
self.bdf_device_map[bdfs[0]] = device
self.device_bdf_map[device] = bdfs[0]
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/libvirt_plugin.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/libvirt_plugin.py
index 27af8063..2fd7d69a 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/libvirt_plugin.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/libvirt_plugin.py
@@ -20,6 +20,7 @@ LOG = logging.getLogger(__name__)
class Plugin(EnvBuilderPlugin):
+
def __init__(self):
super(Plugin, self).__init__()
self.vm_mgr = VMControlOperation()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/tester_env_plugin.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/tester_env_plugin.py
index 0682aac8..0c994d4e 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/tester_env_plugin.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/tester_env_plugin.py
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
class Plugin(EnvBuilderPlugin):
+
def __init__(self):
super(Plugin, self).__init__()
self.dr_mgr = DriverPluginManager()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/bridge_plugin.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/bridge_plugin.py
index 21b8f82c..fb6a54ce 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/bridge_plugin.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/bridge_plugin.py
@@ -12,6 +12,7 @@ from vstf.common.utils import check_call, get_eth_by_bdf, check_output
class BridgePlugin(model.VswitchPlugin):
+
def __init__(self):
pass
@@ -19,7 +20,9 @@ class BridgePlugin(model.VswitchPlugin):
"""clean brs created before.
"""
- out = check_output(r"brctl show | grep -v '^\s' | awk '{print $1}'|sed '1,1d'", shell=True)
+ out = check_output(
+ r"brctl show | grep -v '^\s' | awk '{print $1}'|sed '1,1d'",
+ shell=True)
print out
for br in out.split():
if br != 'br0':
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/manager.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/manager.py
index 785a1db8..4890ee11 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/manager.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/manager.py
@@ -11,9 +11,11 @@ import stevedore
class VswitchPluginManager(object):
+
def __init__(self):
self.plugin = None
- self.mgr = stevedore.extension.ExtensionManager(namespace="vswitch.plugins", invoke_on_load=True)
+ self.mgr = stevedore.extension.ExtensionManager(
+ namespace="vswitch.plugins", invoke_on_load=True)
def clean(self):
if self.plugin:
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/model.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/model.py
index 5d700411..8a80e44e 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/model.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/model.py
@@ -64,4 +64,4 @@ class VswitchPlugin:
pass
def set_fastlink(self, br_cfg):
- return True \ No newline at end of file
+ return True
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/ovs_plugin.py b/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/ovs_plugin.py
index 7ea56d4a..66943c1c 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/ovs_plugin.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/ovs_plugin.py
@@ -28,7 +28,8 @@ class OvsPlugin(model.VswitchPlugin):
self.dirs = {'db': "/usr/local/etc/openvswitch"}
self.cmds = []
self.cmds.append("mkdir -p /usr/local/etc/openvswitch")
- self.cmds.append("ovsdb-tool create /usr/local/etc/openvswitch/conf.db")
+ self.cmds.append(
+ "ovsdb-tool create /usr/local/etc/openvswitch/conf.db")
self.cmds.append("ovsdb-server --remote=punix:/usr/local/var/run/openvswitch/db.sock \
--remote=db:Open_vSwitch,Open_vSwitch,manager_options \
--private-key=db:Open_vSwitch,SSL,private_key \
@@ -81,8 +82,9 @@ class OvsPlugin(model.VswitchPlugin):
name, uplinks = br_cfg['name'], br_cfg['uplinks']
check_call("ovs-vsctl add-br %s" % (name), shell=True)
- if br_cfg['vtep']: # vxlan supports
- local_ip, remote_ip = br_cfg['vtep']['local_ip'], br_cfg['vtep']['remote_ip']
+ if br_cfg['vtep']: # vxlan supports
+ local_ip, remote_ip = br_cfg['vtep'][
+ 'local_ip'], br_cfg['vtep']['remote_ip']
assert len(uplinks) == 1
uplink = uplinks[0]
device = get_eth_by_bdf(uplink['bdf'])
@@ -90,7 +92,9 @@ class OvsPlugin(model.VswitchPlugin):
vtep = 'vx1'
check_call("ifconfig %s %s up" % (device, local_ip), shell=True)
check_call("ovs-vsctl add-port %s %s" % (name, vtep), shell=True)
- check_call("ovs-vsctl set interface %s type=vxlan options:remote_ip=%s" % (vtep, remote_ip), shell=True)
+ check_call(
+ "ovs-vsctl set interface %s type=vxlan options:remote_ip=%s" %
+ (vtep, remote_ip), shell=True)
for uplink in uplinks:
device = get_eth_by_bdf(uplink['bdf'])
vlan_mode = uplink['vlan_mode']
@@ -99,9 +103,13 @@ class OvsPlugin(model.VswitchPlugin):
call("ethtool -A %s rx off tx off " % device, shell=True)
check_call("ovs-vsctl add-port %s %s" % (name, device), shell=True)
if vlan_mode == 'trunk':
- check_call("ovs-vsctl set port %s trunks=%s" % (device, vlan_id), shell=True)
+ check_call(
+ "ovs-vsctl set port %s trunks=%s" %
+ (device, vlan_id), shell=True)
elif vlan_mode == 'access':
- check_call("ovs-vsctl set port %s tag=%s" % (device, vlan_id), shell=True)
+ check_call(
+ "ovs-vsctl set port %s tag=%s" %
+ (device, vlan_id), shell=True)
else:
raise Exception("unreconized vlan_mode:%s" % vlan_mode)
return True
@@ -118,7 +126,8 @@ class OvsPlugin(model.VswitchPlugin):
}
"""
- port, vlan_mode, vlan = tap_cfg['tap_name'], tap_cfg['vlan_mode'], tap_cfg['vlan_id']
+ port, vlan_mode, vlan = tap_cfg['tap_name'], tap_cfg[
+ 'vlan_mode'], tap_cfg['vlan_id']
assert vlan_mode in ('access', 'vxlan')
if int(vlan) > '4095':
# vxlan setting
@@ -162,15 +171,21 @@ class OvsPlugin(model.VswitchPlugin):
if vlan_mode == 'vxlan':
raise Exception("don't support vxlan setting right now.")
elif vlan_mode == 'trunk':
- check_call("ovs-vsctl set port %s trunks=%s" % (port, vlan_id), shell=True)
+ check_call(
+ "ovs-vsctl set port %s trunks=%s" %
+ (port, vlan_id), shell=True)
else:
- check_call("ovs-vsctl set port %s tag=%s" % (port, vlan_id), shell=True)
+ check_call(
+ "ovs-vsctl set port %s tag=%s" %
+ (port, vlan_id), shell=True)
def __fastlink(self, br, p1, p2):
LOG.info("_fastlink(%s,%s,%s)", br, p1, p2)
p1 = p1.replace(' ', '')
p2 = p2.replace(' ', '')
- bdfs = check_output("lspci |grep Eth | awk '{print $1}'", shell=True).splitlines()
+ bdfs = check_output(
+ "lspci |grep Eth | awk '{print $1}'",
+ shell=True).splitlines()
if p1 in bdfs:
p1 = get_eth_by_bdf(p1)
if p2 in bdfs:
@@ -182,6 +197,10 @@ class OvsPlugin(model.VswitchPlugin):
port_num, interface = s.replace('(', ' ').replace(')', ' ').split()
ovs_port[interface] = port_num
pn1, pn2 = ovs_port[p1], ovs_port[p2]
- check_call("ovs-ofctl add-flow %s in_port=%s,priority=100,action=output:%s" % (br, pn1, pn2), shell=True)
- check_call("ovs-ofctl add-flow %s in_port=%s,priority=100,action=output:%s" % (br, pn2, pn1), shell=True)
+ check_call(
+ "ovs-ofctl add-flow %s in_port=%s,priority=100,action=output:%s" %
+ (br, pn1, pn2), shell=True)
+ check_call(
+ "ovs-ofctl add-flow %s in_port=%s,priority=100,action=output:%s" %
+ (br, pn2, pn1), shell=True)
return True
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/__init__.py b/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/equalizer.py b/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/equalizer.py
index 30e1de1f..2fd20db1 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/equalizer.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/equalizer.py
@@ -24,6 +24,7 @@ def run_cmd(cmd, shell=True):
class Resource(object):
+
def __init__(self):
super(Resource, self).__init__()
self.sysfs = "/sys/devices/system/node"
@@ -35,16 +36,18 @@ class Resource(object):
for process_index in xrange(0, len(bin(process_mapping)) - 2):
if process_mapping & 1 << process_index != 0:
core = self._get_core_id(node, process_index)
- if not self.mapping[node].has_key(core):
+ if core not in self.mapping[node]:
self.mapping[node][core] = []
self.mapping[node][core].append(process_index)
def _get_process_mapping(self, numa_node):
- ret = run_cmd("cat " + self.sysfs + '/' + numa_node + '/cpumap').replace(',', '').lstrip('0')
+ ret = run_cmd("cat " + self.sysfs + '/' + numa_node +
+ '/cpumap').replace(',', '').lstrip('0')
return int(ret, 16)
def _get_core_id(self, numa_node, process_index):
- cmd = "cat " + self.sysfs + '/' + numa_node + '/cpu' + str(process_index) + '/topology/core_id'
+ cmd = "cat " + self.sysfs + '/' + numa_node + \
+ '/cpu' + str(process_index) + '/topology/core_id'
return run_cmd(cmd).strip('\n')
def _init_numa(self):
@@ -63,6 +66,7 @@ class Resource(object):
class Equalizer(Resource):
+
def __init__(self):
super(Equalizer, self).__init__()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/get_info.py b/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/get_info.py
index 0c92f979..8a01dfc6 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/get_info.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/get_info.py
@@ -16,6 +16,7 @@ except ImportError:
class GetPhyInfo(object):
+
def __init__(self):
pass
@@ -46,7 +47,9 @@ class GetPhyInfo(object):
numa = {}
num = self._get_numa_num()
for numa_id in range(0, int(num)):
- flag, temp = commands.getstatusoutput('lscpu | grep "NUMA node%s"' % (str(numa_id)))
+ flag, temp = commands.getstatusoutput(
+ 'lscpu | grep "NUMA node%s"' %
+ (str(numa_id)))
try:
temp = temp.split(':')[1].split()[0]
except:
@@ -58,7 +61,9 @@ class GetPhyInfo(object):
def get_nic_numa(self, nic):
result = {}
try:
- flag, id = commands.getstatusoutput('cat /sys/class/net/%s/device/numa_node' % (nic))
+ flag, id = commands.getstatusoutput(
+ 'cat /sys/class/net/%s/device/numa_node' %
+ (nic))
except:
print('get nic numa id failed.')
return id
@@ -102,7 +107,9 @@ class GetPhyInfo(object):
# get vhost info
proc_name = 'vhost-' + _main_pid
- flag, temp = commands.getstatusoutput('ps -ef | grep %s | grep -v grep' % (proc_name))
+ flag, temp = commands.getstatusoutput(
+ 'ps -ef | grep %s | grep -v grep' %
+ (proc_name))
for line in temp.split('\n'):
try:
vhost = line.split()[1]
@@ -134,7 +141,8 @@ class GetPhyInfo(object):
def _get_proc_by_irq(self, irq):
try:
- flag, info = commands.getstatusoutput('ps -ef | grep irq/%s | grep -v grep ' % (irq))
+ flag, info = commands.getstatusoutput(
+ 'ps -ef | grep irq/%s | grep -v grep ' % (irq))
proc_id = info.split('\n')[0].split()[1]
except:
print("[ERROR]grep process id failed.")
@@ -142,7 +150,8 @@ class GetPhyInfo(object):
def get_nic_interrupt_proc(self, nic):
_phy_nic_thread = []
- flag, info = commands.getstatusoutput('cat /proc/interrupts | grep %s' % (nic))
+ flag, info = commands.getstatusoutput(
+ 'cat /proc/interrupts | grep %s' % (nic))
for line in info.split('\n'):
try:
irq_num = line.split(':')[0].split()[0]
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/optimize.py b/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/optimize.py
index 5a09900d..4579c506 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/optimize.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/equalizer/optimize.py
@@ -15,17 +15,24 @@ import re
# pdb.set_trace()
class Optimize(object):
+
def __init__(self):
pass
def bind_cpu(self, cpu_range, thread):
- flag, num = commands.getstatusoutput('taskset -pc %s %s' % (cpu_range, thread))
+ flag, num = commands.getstatusoutput(
+ 'taskset -pc %s %s' %
+ (cpu_range, thread))
return flag
def catch_thread_info(self):
- thread_info = {'fwd_vhost': None, 'src_recv_irq': None, 'dst_send_irq': None}
+ thread_info = {
+ 'fwd_vhost': None,
+ 'src_recv_irq': None,
+ 'dst_send_irq': None}
# top -H get the usage info
- flag, threads_usages = commands.getstatusoutput('top -bH -n1 -c -w 2000')
+ flag, threads_usages = commands.getstatusoutput(
+ 'top -bH -n1 -c -w 2000')
line_array = threads_usages.split('\n')
# get highest vhost line
for line in line_array:
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/__init__.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/affctl.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/affctl.py
index 5b203632..316cbab8 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/affctl.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/affctl.py
@@ -18,4 +18,3 @@ def affctl_load(policy):
def affctl_list():
cmd = "affctl list"
return check_output(cmd, shell=True)
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/iperf.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/iperf.py
index 3105be4b..8eca165c 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/iperf.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/iperf.py
@@ -21,6 +21,7 @@ LOG = logging.getLogger(__name__)
class Iperf(object):
+
def __init__(self):
self._send_processes = []
self._receive_processes = []
@@ -40,7 +41,10 @@ class Iperf(object):
cmd = self.format_send_start(**kwargs)
LOG.debug("cmd:%s", cmd)
- process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ process = subprocess.Popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
time.sleep(1)
ret = process.poll()
if ret is None:
@@ -90,7 +94,10 @@ class Iperf(object):
cmd = self.format_receive_start(**kwargs)
LOG.debug("cmd:%s", cmd)
- process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ process = subprocess.Popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
time.sleep(1)
ret = process.poll()
if ret is None:
@@ -151,5 +158,8 @@ def unit_test():
if __name__ == "__main__":
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf-iperf.log", clevel=logging.DEBUG)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf-iperf.log",
+ clevel=logging.DEBUG)
unit_test()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/netmap.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/netmap.py
index 88a25444..bd9cc97f 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/netmap.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/netmap.py
@@ -18,6 +18,7 @@ LOG = logging.getLogger(__name__)
class Netmap(object):
+
def __init__(self):
self._send_processes = []
self._receive_processes = []
@@ -33,7 +34,10 @@ class Netmap(object):
cmd = self.format_send_start(**kwargs)
LOG.info("cmd:%s", cmd)
- process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ process = my_popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
self._send_processes.append(process)
time.sleep(0.5)
@@ -89,7 +93,10 @@ class Netmap(object):
cmd = self.format_receive_start(**kwargs)
LOG.info("cmd:%s", cmd)
- process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ process = my_popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
self._receive_processes.append(process)
time.sleep(0.5)
@@ -164,5 +171,8 @@ def unit_test():
if __name__ == "__main__":
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-netmap.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-netmap.log",
+ clevel=logging.INFO)
unit_test()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/netns.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/netns.py
index c3b73860..9aaaf58f 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/netns.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/netns.py
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
class Netns(object):
+
def __init__(self):
super(Netns, self).__init__()
self.netns_add_str = "ip netns add %s"
@@ -74,13 +75,14 @@ class Netns(object):
class NetnsManager(object):
+
def __init__(self):
super(NetnsManager, self).__init__()
self._netns = Netns()
def config_dev(self, netdev):
- ns, device, ip = netdev["namespace"], netdev["iface"], netdev['ip_setting'] if "ip_setting" in netdev else \
- netdev['ip']
+ ns, device, ip = netdev["namespace"], netdev["iface"], netdev[
+ 'ip_setting'] if "ip_setting" in netdev else netdev['ip']
self._netns.create_namespace(ns)
self._netns.add_device(ns, device)
self._netns.config_ip(ns, device, ip)
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/netperf.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/netperf.py
index 99f1c904..dac7d649 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/netperf.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/netperf.py
@@ -20,6 +20,7 @@ LOG = logging.getLogger(__name__)
class Netperf(object):
+
def __init__(self):
self._send_processes = []
self._islat = False
@@ -48,7 +49,10 @@ class Netperf(object):
LOG.info("cmd:%s", cmd)
for _ in range(threads):
- process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ process = my_popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
self._send_processes.append(process)
time.sleep(0.5)
for process in self._send_processes:
@@ -119,7 +123,10 @@ class Netperf(object):
cmd = self.format_receive_start(**kwargs)
LOG.info("cmd:%s", cmd)
- process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ process = my_popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
time.sleep(0.5)
ret = process.poll()
if ret:
@@ -177,5 +184,8 @@ def unit_test():
if __name__ == "__main__":
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-netperf.log", clevel=logging.DEBUG)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-netperf.log",
+ clevel=logging.DEBUG)
unit_test()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/pktgen.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/pktgen.py
index 671e1aa7..9aff0a0c 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/pktgen.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/pktgen.py
@@ -18,6 +18,7 @@ LOG = logging.getLogger(__name__)
class Pktgen(object):
+
def __init__(self):
utils.modprobe_pktgen()
self._send_processes = []
@@ -33,7 +34,11 @@ class Pktgen(object):
def _start(self):
cmd = 'echo start > /proc/net/pktgen/pgctrl'
- process = my_popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ process = my_popen(
+ cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
LOG.info('running pid:%s', process.pid)
time.sleep(0.5)
ret = process.poll()
@@ -42,7 +47,8 @@ class Pktgen(object):
self._send_processes.append(process)
error_str = "start pktgen send success"
else:
- error_str = "start pktgen send failed, stdout:%s,stderr:%s" % (process.stdout.read(), process.stderr.read())
+ error_str = "start pktgen send failed, stdout:%s,stderr:%s" % (
+ process.stdout.read(), process.stderr.read())
LOG.info(error_str)
return ret, error_str
@@ -149,5 +155,8 @@ def unit_test():
if __name__ == "__main__":
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-pktgen.log", clevel=logging.DEBUG)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-pktgen.log",
+ clevel=logging.DEBUG)
unit_test()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/qperf.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/qperf.py
index afdf44d7..25272d89 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/qperf.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/qperf.py
@@ -18,6 +18,7 @@ LOG = logging.getLogger(__name__)
class Qperf(object):
+
def __init__(self):
self._send_processes = []
self._receive_processes = []
@@ -30,7 +31,10 @@ class Qperf(object):
def send_start(self, **kwargs):
cmd = self.format_send_start(**kwargs)
LOG.info("cmd:%s", cmd)
- process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ process = my_popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
time.sleep(0.5)
ret = process.poll()
if ret is None:
@@ -76,7 +80,10 @@ class Qperf(object):
cmd = self.format_receive_start(**kwargs)
LOG.info("cmd:%s", cmd)
- process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ process = my_popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
time.sleep(0.5)
ret = process.poll()
if ret is None:
@@ -163,5 +170,8 @@ def unit_test():
if __name__ == "__main__":
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-qperf.log", clevel=logging.DEBUG)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-qperf.log",
+ clevel=logging.DEBUG)
unit_test()
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/sar.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/sar.py
index 0231d5c1..72d0082d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/sar.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/sar.py
@@ -20,13 +20,17 @@ LOG = logging.getLogger(__name__)
class Sar(object):
+
def __init__(self):
self.sar_cmd_str = "sar -u %(interval)s"
self.child_process = {}
def start(self, interval=2):
cmd = self.sar_cmd_str % {'interval': interval}
- child = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ child = my_popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
time.sleep(1)
if child.poll() is not None:
print child.poll()
@@ -55,7 +59,9 @@ class Sar(object):
data = {}
for h, d in zip(head, average):
data[h.strip('%')] = float(d)
- cpu_num = check_output('cat /proc/cpuinfo | grep processor | wc -l', shell=True).strip()
+ cpu_num = check_output(
+ 'cat /proc/cpuinfo | grep processor | wc -l',
+ shell=True).strip()
data.update({'cpu_num': int(cpu_num)})
return data
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/utils.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/utils.py
index 4f7ddb6a..f9ca46cd 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/utils.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/utils.py
@@ -26,7 +26,9 @@ def get_pid_by_name(process_name):
def get_cpu_num():
- cpu_num = check_output('cat /proc/cpuinfo | grep processor | wc -l', shell=True).strip()
+ cpu_num = check_output(
+ 'cat /proc/cpuinfo | grep processor | wc -l',
+ shell=True).strip()
cpu_num = int(cpu_num)
return cpu_num
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/vnstat.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/vnstat.py
index b12ac1af..49e4f0c1 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/vnstat.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/vnstat.py
@@ -19,6 +19,7 @@ LOG = logging.getLogger(__name__)
class VnStat(object):
+
def __init__(self):
self.netns_exec_str = "ip netns exec %s "
self.vnstat_cmd_str = "vnstat -l -i %s"
@@ -63,7 +64,9 @@ class VnStat(object):
m = {}
digits = re.compile(r"\d+\.?\d*")
- units = re.compile("(?:gib|mib|kib|kbit/s|gbits/s|mbit/s|p/s)", re.IGNORECASE | re.MULTILINE)
+ units = re.compile(
+ "(?:gib|mib|kib|kbit/s|gbits/s|mbit/s|p/s)",
+ re.IGNORECASE | re.MULTILINE)
units_arr = units.findall(buf)
LOG.debug(units_arr)
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/perf/vstfperf.py b/testsuites/vstf/vstf_scripts/vstf/agent/perf/vstfperf.py
index 8be3c4e5..939b12ef 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/perf/vstfperf.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/perf/vstfperf.py
@@ -40,6 +40,7 @@ LOG = logging.getLogger(__name__)
class Vstfperf(object):
+
def __init__(self):
for tool in cst.TOOLS:
obj_name = 'vstf_' + tool
@@ -75,7 +76,10 @@ class Vstfperf(object):
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-vstfperf.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-vstfperf.log",
+ clevel=logging.INFO)
perf = Vstfperf()
start = {
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/softagent.py b/testsuites/vstf/vstf_scripts/vstf/agent/softagent.py
index 6271a097..9ba1e126 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/softagent.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/softagent.py
@@ -25,6 +25,7 @@ LOG = logging.getLogger(__name__)
class ENV(object):
+
def __init__(self):
super(ENV, self).__init__()
self.builder = builder.PluginManager()
@@ -45,6 +46,7 @@ class ENV(object):
class Drivers(object):
+
def __init__(self):
super(Drivers, self).__init__()
self.dr_mgr = DriverPluginManager()
@@ -69,6 +71,7 @@ class Drivers(object):
class Cpu(object):
+
def affctl_load(self, policy):
return affctl.affctl_load(policy)
@@ -77,12 +80,13 @@ class Cpu(object):
class Perf(object):
+
def __init__(self):
super(Perf, self).__init__()
self._vnstat = vnstat.VnStat()
self._vstfperf = vstfperf.Vstfperf()
self._sar = sar.Sar()
-
+
def run_vnstat(self, device, namespace=None):
return self._vnstat.run_vnstat(device, namespace)
@@ -92,7 +96,7 @@ class Perf(object):
def perf_run(self, **kwargs):
return self._vstfperf.run(**kwargs)
- def run_cpuwatch(self, interval = 2):
+ def run_cpuwatch(self, interval=2):
return self._sar.start(interval)
def kill_cpuwatch(self, pid):
@@ -106,6 +110,7 @@ class Perf(object):
class EqualizerOps(GetPhyInfo, Optimize):
+
def __init__(self):
super(EqualizerOps, self).__init__()
@@ -115,19 +120,20 @@ class BaseAgent(coll.Collect,
Cpu,
Drivers,
DeviceManager,
- commandline.CommandLine,
+ commandline.CommandLine,
netns.NetnsManager,
SourceCodeManager
):
+
def __init__(self):
super(BaseAgent, self).__init__()
class softAgent(BaseAgent, Perf, EqualizerOps):
+
def __init__(self):
super(softAgent, self).__init__()
if __name__ == '__main__':
softAgent()
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/spirent/spirent.py b/testsuites/vstf/vstf_scripts/vstf/agent/spirent/spirent.py
index 904de736..ff2af1f8 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/spirent/spirent.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/spirent/spirent.py
@@ -14,23 +14,26 @@ import Tkinter
def build_cmd(*args):
cmd = ''
for arg in args:
- cmd = cmd+str(arg)+' '
+ cmd = cmd + str(arg) + ' '
#import pdb
- #pdb.set_trace()
+ # pdb.set_trace()
return cmd
class stcPython():
+
def __init__(self):
self.tclsh = Tkinter.Tcl()
self.stcpkg = '/home/Spirent_TestCenter_4.46/Spirent_TestCenter_Application_Linux'
- self.tclsh.eval("set auto_path [ linsert $auto_path 0 %s ]" %(self.stcpkg))
+ self.tclsh.eval(
+ "set auto_path [ linsert $auto_path 0 %s ]" %
+ (self.stcpkg))
self.tclsh.eval("package require SpirentTestCenter")
def build_cmd(self, *args):
cmd = ''
for arg in args:
- cmd = cmd+str(arg)+' '
+ cmd = cmd + str(arg) + ' '
return cmd
# [ stc base interface ]
@@ -38,206 +41,294 @@ class stcPython():
cmd = build_cmd('stc::init', *args)
return self.tclsh.eval(cmd)
# stc connect
- def stc_connect(self,*args):
+
+ def stc_connect(self, *args):
cmd = build_cmd('stc::connect', *args)
return self.tclsh.eval(cmd)
# stc disconnect
- def stc_disconnect(self,*args):
+
+ def stc_disconnect(self, *args):
cmd = build_cmd('stc::disconnect', *args)
return self.tclsh.eval(cmd)
# stc create
- def stc_create(self,*args):
+
+ def stc_create(self, *args):
cmd = build_cmd('stc::create', *args)
return self.tclsh.eval(cmd)
# stc delete
- def stc_delete(self,*args):
+
+ def stc_delete(self, *args):
cmd = build_cmd('stc::delete', *args)
return self.tclsh.eval(cmd)
# stc config
- def stc_config(self,*args):
+
+ def stc_config(self, *args):
cmd = build_cmd('stc::config', *args)
return self.tclsh.eval(cmd)
# stc get
- def stc_get(self,*args):
+
+ def stc_get(self, *args):
cmd = build_cmd('stc::get', *args)
return self.tclsh.eval(cmd)
# stc apply
- def stc_apply(self,*args):
+
+ def stc_apply(self, *args):
cmd = build_cmd('stc::apply', *args)
return self.tclsh.eval(cmd)
# stc perform
- def stc_perform(self,*args):
+
+ def stc_perform(self, *args):
cmd = build_cmd('stc::perform', *args)
return self.tclsh.eval(cmd)
# stc reserve
- def stc_reserve(self,*args):
+
+ def stc_reserve(self, *args):
cmd = build_cmd('stc::reserve', *args)
return self.tclsh.eval(cmd)
# stc release
- def stc_release(self,*args):
+
+ def stc_release(self, *args):
cmd = build_cmd('stc::release', *args)
return self.tclsh.eval(cmd)
# stc subscribe
- def stc_subscribe(self,*args):
- cmd = build_cmd('stc::subscribe',*args)
+
+ def stc_subscribe(self, *args):
+ cmd = build_cmd('stc::subscribe', *args)
return self.tclsh.eval(cmd)
# stc unsubscribe
- def stc_unsubscribe(self,*args):
+
+ def stc_unsubscribe(self, *args):
cmd = build_cmd('stc::unsubscribe', *args)
return self.tclsh.eval(cmd)
# stc wait until sequencer complete
- def stc_waituntilcomplete(self,*args):
+
+ def stc_waituntilcomplete(self, *args):
cmd = build_cmd('stc::waituntilcomplete', *args)
return self.tclsh.eval(cmd)
# stc help
+
def stc_help(self, *args):
- cmd = build_cmd('stc::help',*args)
+ cmd = build_cmd('stc::help', *args)
return self.tclsh.eval(cmd)
# [ stc expand interface ]
# get one dict-key's value
# return value
- def stc_get_value(self,stc_dict,stc_key):
- cmd = stc_dict+' -'+stc_key
+ def stc_get_value(self, stc_dict, stc_key):
+ cmd = stc_dict + ' -' + stc_key
return self.stc_get(cmd)
# create project
# return: project_name
+
def stc_create_project(self):
return self.stc_create('project')
# create port under project
# return: port name
- def stc_create_port(self,project_name):
- cmd = 'port -under '+project_name
+
+ def stc_create_port(self, project_name):
+ cmd = 'port -under ' + project_name
return self.stc_create(cmd)
# config port location
# return: None
- def stc_config_port_location(self,port_name,chassisAddress,slot,port):
+
+ def stc_config_port_location(self, port_name, chassisAddress, slot, port):
#import pdb
- #pdb.set_trace()
- cmd = port_name+' -location //'+chassisAddress+'/'+slot+'/'+port+' -UseDefaultHost False'
+ # pdb.set_trace()
+ cmd = port_name + ' -location //' + chassisAddress + \
+ '/' + slot + '/' + port + ' -UseDefaultHost False'
return self.stc_config(cmd)
# create streamblock under port
# return: streamblock name
- def stc_create_streamblock(self,port_name,vlan_tag,ExpectedRxPort,srcMac,dstMac,sourceAddr,destAddr):
+
+ def stc_create_streamblock(
+ self,
+ port_name,
+ vlan_tag,
+ ExpectedRxPort,
+ srcMac,
+ dstMac,
+ sourceAddr,
+ destAddr):
#import pdb
- #pdb.set_trace()
- if vlan_tag == None or vlan_tag == 'None':
+ # pdb.set_trace()
+ if vlan_tag is None or vlan_tag == 'None':
frameStruc = '"EthernetII IPv4 Udp"'
- if ExpectedRxPort == '' :
- return self.stc_create( 'streamBlock -under ',port_name,
- '-frameConfig ',frameStruc,
- '-frame "EthernetII.srcMac',srcMac,'EthernetII.dstMac',dstMac,
- 'IPv4.1.sourceAddr',sourceAddr,'IPv4.1.destAddr',destAddr,'"')
- else :
- return self.stc_create( 'streamBlock -under ',port_name,
- '-ExpectedRxPort',ExpectedRxPort,
- '-frameConfig ',frameStruc,
- '-frame "EthernetII.srcMac',srcMac,'EthernetII.dstMac',dstMac,
- 'IPv4.1.sourceAddr',sourceAddr,'IPv4.1.destAddr',destAddr,'"')
- else :
+ if ExpectedRxPort == '':
+ return self.stc_create(
+ 'streamBlock -under ',
+ port_name,
+ '-frameConfig ',
+ frameStruc,
+ '-frame "EthernetII.srcMac',
+ srcMac,
+ 'EthernetII.dstMac',
+ dstMac,
+ 'IPv4.1.sourceAddr',
+ sourceAddr,
+ 'IPv4.1.destAddr',
+ destAddr,
+ '"')
+ else:
+ return self.stc_create(
+ 'streamBlock -under ',
+ port_name,
+ '-ExpectedRxPort',
+ ExpectedRxPort,
+ '-frameConfig ',
+ frameStruc,
+ '-frame "EthernetII.srcMac',
+ srcMac,
+ 'EthernetII.dstMac',
+ dstMac,
+ 'IPv4.1.sourceAddr',
+ sourceAddr,
+ 'IPv4.1.destAddr',
+ destAddr,
+ '"')
+ else:
frameStruc = '"EthernetII Vlan IPv4 Udp"'
- if ExpectedRxPort == '' :
- return self.stc_create( 'streamBlock -under ',port_name,
- '-frameConfig '+frameStruc,
- '-frame "EthernetII.srcMac',srcMac,'EthernetII.dstMac',dstMac,
- 'Vlan.1.id',vlan_tag,
- 'IPv4.1.sourceAddr',sourceAddr,'IPv4.1.destAddr',destAddr,'"')
- else :
- return self.stc_create( 'streamBlock -under ',port_name,
- '-ExpectedRxPort',ExpectedRxPort,
- '-frameConfig '+frameStruc,
- '-frame "EthernetII.srcMac',srcMac,'EthernetII.dstMac',dstMac,
- 'Vlan.1.id',vlan_tag,
- 'IPv4.1.sourceAddr',sourceAddr,'IPv4.1.destAddr',destAddr,'"')
+ if ExpectedRxPort == '':
+ return self.stc_create(
+ 'streamBlock -under ',
+ port_name,
+ '-frameConfig ' +
+ frameStruc,
+ '-frame "EthernetII.srcMac',
+ srcMac,
+ 'EthernetII.dstMac',
+ dstMac,
+ 'Vlan.1.id',
+ vlan_tag,
+ 'IPv4.1.sourceAddr',
+ sourceAddr,
+ 'IPv4.1.destAddr',
+ destAddr,
+ '"')
+ else:
+ return self.stc_create(
+ 'streamBlock -under ',
+ port_name,
+ '-ExpectedRxPort',
+ ExpectedRxPort,
+ '-frameConfig ' +
+ frameStruc,
+ '-frame "EthernetII.srcMac',
+ srcMac,
+ 'EthernetII.dstMac',
+ dstMac,
+ 'Vlan.1.id',
+ vlan_tag,
+ 'IPv4.1.sourceAddr',
+ sourceAddr,
+ 'IPv4.1.destAddr',
+ destAddr,
+ '"')
# config streamblock with part arguments
# argument list use args dictionary
- def stc_config_streamblock(self,streamblock_name,args_dict):
+
+ def stc_config_streamblock(self, streamblock_name, args_dict):
cmd = ''
- for key in args_dict.keys() :
- temp_cmd = '-'+key+' '+str(args_dict[key])
+ for key in args_dict.keys():
+ temp_cmd = '-' + key + ' ' + str(args_dict[key])
cmd = cmd + temp_cmd
- return self.stc_config(streamblock_name,cmd)
+ return self.stc_config(streamblock_name, cmd)
# get generator name from port name
# return: generator name
- def stc_get_generator(self,port_name):
- cmd = port_name+' -children-generator'
+
+ def stc_get_generator(self, port_name):
+ cmd = port_name + ' -children-generator'
return self.stc_get(cmd)
# config generator with part arguments
# argument list use args dictionary
# return none
- def stc_config_generator(self,generator_name,args_dict):
+
+ def stc_config_generator(self, generator_name, args_dict):
cmd = ''
- for key in args_dict.keys() :
- temp_cmd = '-'+key+' '+str(args_dict[key])
+ for key in args_dict.keys():
+ temp_cmd = '-' + key + ' ' + str(args_dict[key])
cmd = cmd + temp_cmd
- return self.stc_config(generator_name,cmd)
+ return self.stc_config(generator_name, cmd)
# attach port
# return: port's parent project info
- def stc_attach_ports(self,portList):
+
+ def stc_attach_ports(self, portList):
cmd = 'AttachPorts -portList {'
- for port in portList :
- cmd = cmd+' '+port
- cmd = cmd+'} -autoConnect TRUE'
+ for port in portList:
+ cmd = cmd + ' ' + port
+ cmd = cmd + '} -autoConnect TRUE'
return self.stc_perform(cmd)
# config src mac and dst mac
# return: none
- def stc_config_ethII(self,ethII,src_mac,dst_mac):
- cmd = ethII+' -srcMac '+src_mac+' -dstMac '+dst_mac
+
+ def stc_config_ethII(self, ethII, src_mac, dst_mac):
+ cmd = ethII + ' -srcMac ' + src_mac + ' -dstMac ' + dst_mac
return self.stc_config(cmd)
# config src ip and dst ip
# return: none
- def stc_config_ethIII(self,ethIII,src_ip,dst_ip):
- cmd = ethIII+' -sourceAddr '+src_ip+' -destAddr '+dst_ip
+
+ def stc_config_ethIII(self, ethIII, src_ip, dst_ip):
+ cmd = ethIII + ' -sourceAddr ' + src_ip + ' -destAddr ' + dst_ip
return self.stc_config(cmd)
# start streamblock
# return: none
- def stc_streamblock_start(self,streamblock_list):
+
+ def stc_streamblock_start(self, streamblock_list):
cmd = 'StreamBlockStart -StreamBlockList {'
- for streamblock in streamblock_list :
- cmd = cmd+' '+streamblock
- cmd = cmd+' } -ExecuteSynchronous TRUE'
+ for streamblock in streamblock_list:
+ cmd = cmd + ' ' + streamblock
+ cmd = cmd + ' } -ExecuteSynchronous TRUE'
return self.stc_perform(cmd)
# stop streamblock
- def stc_streamblock_stop(self,streamblock_list):
+
+ def stc_streamblock_stop(self, streamblock_list):
cmd = 'StreamBlockStop -StreamBlockList {'
- for streamblock in streamblock_list :
- cmd = cmd+' '+streamblock
- cmd = cmd+' } -ExecuteSynchronous TRUE'
+ for streamblock in streamblock_list:
+ cmd = cmd + ' ' + streamblock
+ cmd = cmd + ' } -ExecuteSynchronous TRUE'
return self.stc_perform(cmd)
# start generator
# return: none
- def stc_generator_start(self,generator_List):
+
+ def stc_generator_start(self, generator_List):
cmd = 'GeneratorStart -generatorList {'
- for generator in generator_List :
- cmd = cmd+' '+generator
- cmd = cmd+' }'
+ for generator in generator_List:
+ cmd = cmd + ' ' + generator
+ cmd = cmd + ' }'
return self.stc_perform(cmd)
# stop generator
# return: none
- def stc_generator_stop(self,generator_List):
+
+ def stc_generator_stop(self, generator_List):
cmd = 'GeneratorStop -generatorList {'
- for generator in generator_List :
- cmd = cmd+' '+generator
- cmd = cmd+' }'
+ for generator in generator_List:
+ cmd = cmd + ' ' + generator
+ cmd = cmd + ' }'
return self.stc_perform(cmd)
# create rfc2544 throughput test
+
def stc_setup_rfc2544_throughput(self):
pass
# create rfc2544 frameloss test
+
def stc_setup_rfc2544_frameloss(self):
pass
# create rfc2544 latency test
+
def stc_setup_rfc2544_latency(self):
pass
# start Sequence start
+
def stc_sequence_start(self):
return self.stc_perform('SequencerStart')
# output rfc2544 throughput result
+
def stc_get_rfc2544_throughput_result(self):
pass
# output rfc2544 frameloss result
+
def stc_get_rfc2544_frameloss_result(self):
pass
# output rfc2544 latency result
+
def stc_get_rfc2544_latency_result(self):
pass
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/spirent/tools.py b/testsuites/vstf/vstf_scripts/vstf/agent/spirent/tools.py
index 088a7b13..0936d39d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/spirent/tools.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/spirent/tools.py
@@ -11,324 +11,376 @@
import time
from spirent import stcPython
+
class Spirent_Tools(object):
baseAPI = stcPython()
+
def __init__(self):
"""This class provide API of Spirent
-
+
"""
super(Spirent_Tools, self).__init__()
-
- def send_packet(self,flow):
+
+ def send_packet(self, flow):
try:
#import pdb
- #pdb.set_trace()
+ # pdb.set_trace()
flow = eval(flow)
- #stc init action
+ # stc init action
self.baseAPI.stc_perform(' ResetConfig -config system1')
self.baseAPI.stc_init()
- #create project
+ # create project
project = self.baseAPI.stc_create_project()
- #create port
+ # create port
port_handle = self.baseAPI.stc_create_port(project)
- #config port
+ # config port
slot = flow['send_port'].split('/')[0]
port = flow['send_port'].split('/')[1]
- self.baseAPI.stc_config_port_location(port_handle,flow['tester_ip'],slot,port)
- #create streamblock
+ self.baseAPI.stc_config_port_location(
+ port_handle, flow['tester_ip'], slot, port)
+ # create streamblock
streamblock_handle = self.baseAPI.stc_create_streamblock(
- port_name = port_handle,
- ExpectedRxPort = '',
- vlan_tag = flow['vlan'],
- srcMac = flow['src_mac'],
- dstMac = flow['dst_mac'],
- sourceAddr = flow['src_ip'],
- destAddr =flow['dst_ip']
- )
+ port_name=port_handle,
+ ExpectedRxPort='',
+ vlan_tag=flow['vlan'],
+ srcMac=flow['src_mac'],
+ dstMac=flow['dst_mac'],
+ sourceAddr=flow['src_ip'],
+ destAddr=flow['dst_ip']
+ )
# attach port
port_list = [port_handle]
self.baseAPI.stc_attach_ports(port_list)
- #start streamblock
+ # start streamblock
streamblock_list = [streamblock_handle]
flag = self.baseAPI.stc_streamblock_start(streamblock_list)
return str(streamblock_list).strip('[]')
- except :
+ except:
print("[ERROR]create stream block and send packet failed.")
return False
- def mac_learning(self,flowA,flowB):
+ def mac_learning(self, flowA, flowB):
try:
#import pdb
- #pdb.set_trace()
+ # pdb.set_trace()
flowA = eval(flowA)
flowB = eval(flowB)
port_list = []
streamblock_list = []
- #stc init action
+ # stc init action
self.baseAPI.stc_perform(' ResetConfig -config system1')
self.baseAPI.stc_init()
- #create project
+ # create project
project = self.baseAPI.stc_create_project()
- #create port and config port
- for flow in [ flowA,flowB ]:
+ # create port and config port
+ for flow in [flowA, flowB]:
flow['port_handle'] = self.baseAPI.stc_create_port(project)
tmp_test_ip = flow['tester_ip']
tmp_slot = flow['send_port'].split('/')[0]
tmp_port = flow['send_port'].split('/')[1]
- self.baseAPI.stc_config_port_location(flow['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
- #create streamblock
- flow['streamblock'] = self.baseAPI.stc_create_streamblock(port_name = flow['port_handle'],
- ExpectedRxPort = '',
- vlan_tag = flow['vlan'],
- srcMac = flow['src_mac'],
- dstMac = flow['dst_mac'],
- sourceAddr = flow['src_ip'],
- destAddr =flow['dst_ip'])
- #create port and stream block list
+ self.baseAPI.stc_config_port_location(
+ flow['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+ # create streamblock
+ flow['streamblock'] = self.baseAPI.stc_create_streamblock(
+ port_name=flow['port_handle'],
+ ExpectedRxPort='',
+ vlan_tag=flow['vlan'],
+ srcMac=flow['src_mac'],
+ dstMac=flow['dst_mac'],
+ sourceAddr=flow['src_ip'],
+ destAddr=flow['dst_ip'])
+ # create port and stream block list
port_list.append(flow['port_handle'])
streamblock_list.append(flow['streamblock'])
- #attach port
+ # attach port
self.baseAPI.stc_attach_ports(port_list)
- #start streamblock
+ # start streamblock
flag = self.baseAPI.stc_streamblock_start(streamblock_list)
# mac learning
time.sleep(2)
# stop stream block
self.baseAPI.stc_streamblock_stop(streamblock_list)
# delete streamblock and release port
- for flow in [ flowA,flowB ]:
+ for flow in [flowA, flowB]:
tmp_test_ip = flow['tester_ip']
tmp_slot = flow['send_port'].split('/')[0]
tmp_port = flow['send_port'].split('/')[1]
self.baseAPI.stc_delete(flow['streamblock'])
- self.baseAPI.stc_release('%s/%s/%s' %(tmp_test_ip,tmp_slot,tmp_port))
+ self.baseAPI.stc_release(
+ '%s/%s/%s' %
+ (tmp_test_ip, tmp_slot, tmp_port))
# delete project
self.baseAPI.stc_delete('project1')
ret = self.baseAPI.stc_perform('ResetConfig -config system1')
return True
- except :
+ except:
print("[ERROR]mac learning failed")
return False
- def stop_flow(self,streamblock_list,flow):
+ def stop_flow(self, streamblock_list, flow):
flow = eval(flow)
streamblock_list = streamblock_list.strip('\'').split(',')
- #stop streamblock list
- try :
+ # stop streamblock list
+ try:
ret = self.baseAPI.stc_streamblock_stop(streamblock_list)
- except :
+ except:
print("[ERROR]Stop the streamblock list failed.")
return False
- #delete streamblock
- try :
- for streamblock in streamblock_list :
+ # delete streamblock
+ try:
+ for streamblock in streamblock_list:
ret = self.baseAPI.stc_delete(streamblock)
- except :
+ except:
print("[ERROR]delete stream block.")
return False
- #release port
- try :
+ # release port
+ try:
slot = flow['send_port'].split('/')[0]
port = flow['send_port'].split('/')[1]
- ret = self.baseAPI.stc_release('%s/%s/%s' %(flow['tester_ip'],slot,port))
- except :
+ ret = self.baseAPI.stc_release(
+ '%s/%s/%s' %
+ (flow['tester_ip'], slot, port))
+ except:
print("[ERROR]Release port failed")
return False
- ##delete project
- try :
+ # delete project
+ try:
ret = self.baseAPI.stc_delete('project1')
ret = self.baseAPI.stc_perform('ResetConfig -config system1')
return True
- except :
+ except:
print("[ERROR]Delete project1 failed.")
return False
-
- def run_rfc2544_throughput(self,forward_init_flows,reverse_init_flows):
+
+ def run_rfc2544_throughput(self, forward_init_flows, reverse_init_flows):
#import pdb
- #pdb.set_trace()
- #rebuild the flows
+ # pdb.set_trace()
+ # rebuild the flows
forward_init_flows = eval(forward_init_flows)
reverse_init_flows = eval(reverse_init_flows)
- #stc init action
+ # stc init action
self.baseAPI.stc_perform(' ResetConfig -config system1')
self.baseAPI.stc_init()
- #create project
+ # create project
project = self.baseAPI.stc_create_project()
- #create sequencer
- seq_handle = self.baseAPI.stc_create('Sequencer -under %s' %(project))
- #create port handle
+ # create sequencer
+ seq_handle = self.baseAPI.stc_create('Sequencer -under %s' % (project))
+ # create port handle
forward_port_handle = self.baseAPI.stc_create_port(project)
reverse_port_handle = self.baseAPI.stc_create_port(project)
- #create forward flow streamblock
+ # create forward flow streamblock
for key in forward_init_flows.keys():
forward_init_flows[key]['port_handle'] = forward_port_handle
tmp_test_ip = forward_init_flows[key]['tester_ip']
- tmp_slot = forward_init_flows[key]['send_port'].split('/')[0]
- tmp_port = forward_init_flows[key]['send_port'].split('/')[1]
- self.baseAPI.stc_config_port_location(forward_init_flows[key]['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
- #create streamblock
- forward_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(port_name = forward_init_flows[key]['port_handle'],
- vlan_tag = forward_init_flows[key]['vlan'],
- ExpectedRxPort = reverse_port_handle,
- srcMac = forward_init_flows[key]['src_mac'],
- dstMac = forward_init_flows[key]['dst_mac'],
- sourceAddr = forward_init_flows[key]['src_ip'],
- destAddr = forward_init_flows[key]['dst_ip'])
- #create reverse flow streamblock
+ tmp_slot = forward_init_flows[key]['send_port'].split('/')[0]
+ tmp_port = forward_init_flows[key]['send_port'].split('/')[1]
+ self.baseAPI.stc_config_port_location(
+ forward_init_flows[key]['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+ # create streamblock
+ forward_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(
+ port_name=forward_init_flows[key]['port_handle'],
+ vlan_tag=forward_init_flows[key]['vlan'],
+ ExpectedRxPort=reverse_port_handle,
+ srcMac=forward_init_flows[key]['src_mac'],
+ dstMac=forward_init_flows[key]['dst_mac'],
+ sourceAddr=forward_init_flows[key]['src_ip'],
+ destAddr=forward_init_flows[key]['dst_ip'])
+ # create reverse flow streamblock
for key in reverse_init_flows.keys():
reverse_init_flows[key]['port_handle'] = reverse_port_handle
tmp_test_ip = reverse_init_flows[key]['tester_ip']
- tmp_slot = reverse_init_flows[key]['send_port'].split('/')[0]
- tmp_port = reverse_init_flows[key]['send_port'].split('/')[1]
- self.baseAPI.stc_config_port_location(reverse_init_flows[key]['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
- #create streamblock
- reverse_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(port_name = reverse_init_flows[key]['port_handle'],
- vlan_tag = reverse_init_flows[key]['vlan'],
- ExpectedRxPort = forward_port_handle,
- srcMac = reverse_init_flows[key]['src_mac'],
- dstMac = reverse_init_flows[key]['dst_mac'],
- sourceAddr = reverse_init_flows[key]['src_ip'],
- destAddr = reverse_init_flows[key]['dst_ip'])
- #Create the RFC 2544 throughput test
- throughput_config = self.baseAPI.stc_create('Rfc2544ThroughputConfig -under ',project,
- '-AcceptableFrameLoss 0.01',
- '-NumOfTrials 1',
- '-DurationSeconds 60',
- '-SearchMode BINARY',
- '-RateLowerLimit 1',
- '-RateUpperLimit 100',
- '-RateInitial 10',
- '-UseExistingStreamBlocks True',
- '-EnableLearning False',
- '-FrameSizeIterationMode CUSTOM',
- '-CustomFrameSizeList "70 128 256 512 1024 1280 1518"',
- '-LatencyType LIFO',
- '-EnableJitterMeasurement TRUE'
- )
+ tmp_slot = reverse_init_flows[key]['send_port'].split('/')[0]
+ tmp_port = reverse_init_flows[key]['send_port'].split('/')[1]
+ self.baseAPI.stc_config_port_location(
+ reverse_init_flows[key]['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+ # create streamblock
+ reverse_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(
+ port_name=reverse_init_flows[key]['port_handle'],
+ vlan_tag=reverse_init_flows[key]['vlan'],
+ ExpectedRxPort=forward_port_handle,
+ srcMac=reverse_init_flows[key]['src_mac'],
+ dstMac=reverse_init_flows[key]['dst_mac'],
+ sourceAddr=reverse_init_flows[key]['src_ip'],
+ destAddr=reverse_init_flows[key]['dst_ip'])
+ # Create the RFC 2544 throughput test
+ throughput_config = self.baseAPI.stc_create(
+ 'Rfc2544ThroughputConfig -under ',
+ project,
+ '-AcceptableFrameLoss 0.01',
+ '-NumOfTrials 1',
+ '-DurationSeconds 60',
+ '-SearchMode BINARY',
+ '-RateLowerLimit 1',
+ '-RateUpperLimit 100',
+ '-RateInitial 10',
+ '-UseExistingStreamBlocks True',
+ '-EnableLearning False',
+ '-FrameSizeIterationMode CUSTOM',
+ '-CustomFrameSizeList "70 128 256 512 1024 1280 1518"',
+ '-LatencyType LIFO',
+ '-EnableJitterMeasurement TRUE')
#import pdb
- #pdb.set_trace()
+ # pdb.set_trace()
# list streamblocks
streamblock_list = '" '
for key in forward_init_flows.keys():
- streamblock_list = streamblock_list+forward_init_flows[key]['streamblock']+' '
+ streamblock_list = streamblock_list + \
+ forward_init_flows[key]['streamblock'] + ' '
for key in reverse_init_flows.keys():
- streamblock_list = streamblock_list+reverse_init_flows[key]['streamblock']+' '
- streamblock_list = streamblock_list+'"'
+ streamblock_list = streamblock_list + \
+ reverse_init_flows[key]['streamblock'] + ' '
+ streamblock_list = streamblock_list + '"'
- throughput_sbProfile= self.baseAPI.stc_create('Rfc2544StreamBlockProfile -under '+throughput_config+' -Active TRUE -LocalActive TRUE')
- self.baseAPI.stc_config(throughput_sbProfile,'-StreamBlockList '+streamblock_list)
- self.baseAPI.stc_perform('ExpandBenchmarkConfigCommand','-config ',throughput_config)
+ throughput_sbProfile = self.baseAPI.stc_create(
+ 'Rfc2544StreamBlockProfile -under ' +
+ throughput_config +
+ ' -Active TRUE -LocalActive TRUE')
+ self.baseAPI.stc_config(
+ throughput_sbProfile,
+ '-StreamBlockList ' +
+ streamblock_list)
+ self.baseAPI.stc_perform(
+ 'ExpandBenchmarkConfigCommand',
+ '-config ',
+ throughput_config)
- #attach the port before testing
- port_list = [ forward_port_handle,reverse_port_handle]
+ # attach the port before testing
+ port_list = [forward_port_handle, reverse_port_handle]
self.baseAPI.stc_attach_ports(port_list)
- #stc apply and begin to sequence test
+ # stc apply and begin to sequence test
self.baseAPI.stc_apply()
self.baseAPI.stc_perform("SequencerStart")
- #wait until complete
+ # wait until complete
self.baseAPI.stc_waituntilcomplete()
-
- #get result db
- resultsdb = self.baseAPI.stc_get("system1.project.TestResultSetting", "-CurrentResultFileName")
- results_dict = self.baseAPI.stc_perform('QueryResult','-DatabaseConnectionString',resultsdb,'-ResultPath RFC2544ThroughputTestResultDetailedSummaryView')
- #print results_dict
- return True,results_dict
- def run_rfc2544_frameloss(self,forward_init_flows,reverse_init_flows):
+ # get result db
+ resultsdb = self.baseAPI.stc_get(
+ "system1.project.TestResultSetting",
+ "-CurrentResultFileName")
+ results_dict = self.baseAPI.stc_perform(
+ 'QueryResult',
+ '-DatabaseConnectionString',
+ resultsdb,
+ '-ResultPath RFC2544ThroughputTestResultDetailedSummaryView')
+ # print results_dict
+ return True, results_dict
+
+ def run_rfc2544_frameloss(self, forward_init_flows, reverse_init_flows):
#import pdb
- #pdb.set_trace()
- #rebuild the flows
+ # pdb.set_trace()
+ # rebuild the flows
forward_init_flows = eval(forward_init_flows)
reverse_init_flows = eval(reverse_init_flows)
- #stc init action
+ # stc init action
self.baseAPI.stc_perform(' ResetConfig -config system1')
self.baseAPI.stc_init()
- #create project
+ # create project
project = self.baseAPI.stc_create_project()
- #create sequencer
- seq_handle = self.baseAPI.stc_create('Sequencer -under %s' %(project))
- #create port handle
+ # create sequencer
+ seq_handle = self.baseAPI.stc_create('Sequencer -under %s' % (project))
+ # create port handle
forward_port_handle = self.baseAPI.stc_create_port(project)
reverse_port_handle = self.baseAPI.stc_create_port(project)
- #create forward flow streamblock
+ # create forward flow streamblock
for key in forward_init_flows.keys():
forward_init_flows[key]['port_handle'] = forward_port_handle
tmp_test_ip = forward_init_flows[key]['tester_ip']
- tmp_slot = forward_init_flows[key]['send_port'].split('/')[0]
- tmp_port = forward_init_flows[key]['send_port'].split('/')[1]
- self.baseAPI.stc_config_port_location(forward_init_flows[key]['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
- #create streamblock
- forward_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(port_name = forward_init_flows[key]['port_handle'],
- vlan_tag = forward_init_flows[key]['vlan'],
- ExpectedRxPort = reverse_port_handle,
- srcMac = forward_init_flows[key]['src_mac'],
- dstMac = forward_init_flows[key]['dst_mac'],
- sourceAddr = forward_init_flows[key]['src_ip'],
- destAddr = forward_init_flows[key]['dst_ip'])
- #create reverse flow streamblock
+ tmp_slot = forward_init_flows[key]['send_port'].split('/')[0]
+ tmp_port = forward_init_flows[key]['send_port'].split('/')[1]
+ self.baseAPI.stc_config_port_location(
+ forward_init_flows[key]['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+ # create streamblock
+ forward_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(
+ port_name=forward_init_flows[key]['port_handle'],
+ vlan_tag=forward_init_flows[key]['vlan'],
+ ExpectedRxPort=reverse_port_handle,
+ srcMac=forward_init_flows[key]['src_mac'],
+ dstMac=forward_init_flows[key]['dst_mac'],
+ sourceAddr=forward_init_flows[key]['src_ip'],
+ destAddr=forward_init_flows[key]['dst_ip'])
+ # create reverse flow streamblock
for key in reverse_init_flows.keys():
reverse_init_flows[key]['port_handle'] = reverse_port_handle
tmp_test_ip = reverse_init_flows[key]['tester_ip']
- tmp_slot = reverse_init_flows[key]['send_port'].split('/')[0]
- tmp_port = reverse_init_flows[key]['send_port'].split('/')[1]
- self.baseAPI.stc_config_port_location(reverse_init_flows[key]['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
- #create streamblock
- reverse_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(port_name = reverse_init_flows[key]['port_handle'],
- vlan_tag = reverse_init_flows[key]['vlan'],
- ExpectedRxPort = forward_port_handle,
- srcMac = reverse_init_flows[key]['src_mac'],
- dstMac = reverse_init_flows[key]['dst_mac'],
- sourceAddr = reverse_init_flows[key]['src_ip'],
- destAddr = reverse_init_flows[key]['dst_ip'])
- #Create the RFC 2544 frameloss test
- frameloss_config = self.baseAPI.stc_create('Rfc2544FrameLossConfig -under ',project,
- '-NumOfTrials 1 ',
- '-DurationSeconds 60 ',
- '-LoadUnits PERCENT_LINE_RATE ',
- '-LoadType CUSTOM '
- '-CustomLoadList 100 '
- '-UseExistingStreamBlocks True ',
- '-EnableLearning False ',
- '-FrameSizeIterationMode CUSTOM ',
- '-CustomFrameSizeList "70 128 256 512 1024 1280 1518"',
- '-LatencyType LIFO',
- '-EnableJitterMeasurement TRUE'
- )
+ tmp_slot = reverse_init_flows[key]['send_port'].split('/')[0]
+ tmp_port = reverse_init_flows[key]['send_port'].split('/')[1]
+ self.baseAPI.stc_config_port_location(
+ reverse_init_flows[key]['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+ # create streamblock
+ reverse_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(
+ port_name=reverse_init_flows[key]['port_handle'],
+ vlan_tag=reverse_init_flows[key]['vlan'],
+ ExpectedRxPort=forward_port_handle,
+ srcMac=reverse_init_flows[key]['src_mac'],
+ dstMac=reverse_init_flows[key]['dst_mac'],
+ sourceAddr=reverse_init_flows[key]['src_ip'],
+ destAddr=reverse_init_flows[key]['dst_ip'])
+ # Create the RFC 2544 frameloss test
+ frameloss_config = self.baseAPI.stc_create(
+ 'Rfc2544FrameLossConfig -under ',
+ project,
+ '-NumOfTrials 1 ',
+ '-DurationSeconds 60 ',
+ '-LoadUnits PERCENT_LINE_RATE ',
+ '-LoadType CUSTOM '
+ '-CustomLoadList 100 '
+ '-UseExistingStreamBlocks True ',
+ '-EnableLearning False ',
+ '-FrameSizeIterationMode CUSTOM ',
+ '-CustomFrameSizeList "70 128 256 512 1024 1280 1518"',
+ '-LatencyType LIFO',
+ '-EnableJitterMeasurement TRUE')
#import pdb
- #pdb.set_trace()
+ # pdb.set_trace()
# list streamblocks
streamblock_list = '" '
for key in forward_init_flows.keys():
- streamblock_list = streamblock_list+forward_init_flows[key]['streamblock']+' '
+ streamblock_list = streamblock_list + \
+ forward_init_flows[key]['streamblock'] + ' '
for key in reverse_init_flows.keys():
- streamblock_list = streamblock_list+reverse_init_flows[key]['streamblock']+' '
- streamblock_list = streamblock_list+'"'
+ streamblock_list = streamblock_list + \
+ reverse_init_flows[key]['streamblock'] + ' '
+ streamblock_list = streamblock_list + '"'
- frameloss_sbProfile= self.baseAPI.stc_create('Rfc2544StreamBlockProfile -under '+frameloss_config+' -Active TRUE -LocalActive TRUE')
- self.baseAPI.stc_config(frameloss_sbProfile,'-StreamBlockList '+streamblock_list)
- self.baseAPI.stc_perform('ExpandBenchmarkConfigCommand','-config ',frameloss_config)
+ frameloss_sbProfile = self.baseAPI.stc_create(
+ 'Rfc2544StreamBlockProfile -under ' +
+ frameloss_config +
+ ' -Active TRUE -LocalActive TRUE')
+ self.baseAPI.stc_config(
+ frameloss_sbProfile,
+ '-StreamBlockList ' +
+ streamblock_list)
+ self.baseAPI.stc_perform(
+ 'ExpandBenchmarkConfigCommand',
+ '-config ',
+ frameloss_config)
- #attach the port before testing
- port_list = [ forward_port_handle,reverse_port_handle]
+ # attach the port before testing
+ port_list = [forward_port_handle, reverse_port_handle]
self.baseAPI.stc_attach_ports(port_list)
- #stc apply and begin to sequence test
+ # stc apply and begin to sequence test
self.baseAPI.stc_apply()
self.baseAPI.stc_perform("SequencerStart")
- #wait until complete
+ # wait until complete
self.baseAPI.stc_waituntilcomplete()
- #get result db
- resultsdb = self.baseAPI.stc_get("system1.project.TestResultSetting", "-CurrentResultFileName")
- results_dict = self.baseAPI.stc_perform('QueryResult','-DatabaseConnectionString',resultsdb,'-ResultPath RFC2544FrameLossTestResultDetailedSummaryView')
+ # get result db
+ resultsdb = self.baseAPI.stc_get(
+ "system1.project.TestResultSetting",
+ "-CurrentResultFileName")
+ results_dict = self.baseAPI.stc_perform(
+ 'QueryResult',
+ '-DatabaseConnectionString',
+ resultsdb,
+ '-ResultPath RFC2544FrameLossTestResultDetailedSummaryView')
#import pdb
- #pdb.set_trace()
- return True,results_dict
+ # pdb.set_trace()
+ return True, results_dict
- def run_rfc2544_latency(self,forward_init_flows,reverse_init_flows):
+ def run_rfc2544_latency(self, forward_init_flows, reverse_init_flows):
pass
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/agent/spirentagent.py b/testsuites/vstf/vstf_scripts/vstf/agent/spirentagent.py
index 8951f96d..46583dfe 100644
--- a/testsuites/vstf/vstf_scripts/vstf/agent/spirentagent.py
+++ b/testsuites/vstf/vstf_scripts/vstf/agent/spirentagent.py
@@ -12,5 +12,6 @@ from vstf.agent.spirent.tools import Spirent_Tools as Spirent
class agentSpirent(Spirent):
+
def __init__(self):
super(agentSpirent, self).__init__()
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/__init__.py b/testsuites/vstf/vstf_scripts/vstf/common/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/candy_text.py b/testsuites/vstf/vstf_scripts/vstf/common/candy_text.py
index 818ae767..306d9124 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/candy_text.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/candy_text.py
@@ -36,7 +36,7 @@ def dict2text(info):
def text2dict(candy):
- tmp = candy.replace("##","#").split("#")
+ tmp = candy.replace("##", "#").split("#")
result = {
"sn": int(tmp[0]),
"node": tmp[1],
@@ -49,7 +49,7 @@ def text2dict(candy):
def text2tuple(candy):
- tmp = candy.replace("##","#").split("#")
+ tmp = candy.replace("##", "#").split("#")
sn = int(tmp[0])
node = tmp[1]
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/cfgparser.py b/testsuites/vstf/vstf_scripts/vstf/common/cfgparser.py
index 9de5a2cd..802cb214 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/cfgparser.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/cfgparser.py
@@ -12,6 +12,7 @@ from oslo.config import cfg
class CfgParser(object):
+
def __init__(self, config_file):
super(CfgParser, self).__init__()
if os.path.isfile(config_file) is False:
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/cliutil.py b/testsuites/vstf/vstf_scripts/vstf/common/cliutil.py
index 541bba7b..8997cd6a 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/cliutil.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/cliutil.py
@@ -7,6 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+
def arg(*args, **kwargs):
"""Decorator for CLI args.
@@ -33,4 +34,4 @@ def add_arg(func, *args, **kwargs):
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
- func.arguments.insert(0, (args, kwargs)) \ No newline at end of file
+ func.arguments.insert(0, (args, kwargs))
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/cmds.py b/testsuites/vstf/vstf_scripts/vstf/common/cmds.py
index 2952be2c..f348a804 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/cmds.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/cmds.py
@@ -20,7 +20,9 @@ def execute(cmd=None, care_result=True):
try:
(status, ret) = commands.getstatusoutput(cmd)
if care_result and 0 != status:
- LOG.error('CMD<%(cmd)s> \nSTDOUT:\n%(ret)s.', {'cmd':cmd, 'ret':ret})
+ LOG.error(
+ 'CMD<%(cmd)s> \nSTDOUT:\n%(ret)s.', {
+ 'cmd': cmd, 'ret': ret})
return None
else:
return ret
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/daemon.py b/testsuites/vstf/vstf_scripts/vstf/common/daemon.py
index 35933dad..46087493 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/daemon.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/daemon.py
@@ -7,7 +7,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import sys, os, time, atexit
+import sys
+import os
+import time
+import atexit
import logging
from signal import SIGTERM
@@ -17,11 +20,16 @@ LOG = logging.getLogger(__name__)
class Daemon(object):
"""
A generic daemon class.
-
+
Usage: subclass the Daemon class and override the run() method
"""
- def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
+ def __init__(
+ self,
+ pidfile,
+ stdin='/dev/null',
+ stdout='/dev/null',
+ stderr='/dev/null'):
super(Daemon, self).__init__()
self.stdin = stdin
self.stdout = stdout
@@ -30,7 +38,7 @@ class Daemon(object):
def daemonize(self):
"""
- do the UNIX double-fork magic, see Stevens' "Advanced
+ do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
@@ -38,7 +46,7 @@ class Daemon(object):
pid = os.fork()
if pid > 0:
sys.exit(0)
- except OSError, e:
+ except OSError as e:
LOG.error("fork #1 failed: %(errno)s, %(strerror)s",
{'errno': e.errno, 'strerror': e.strerror})
sys.exit(1)
@@ -54,7 +62,7 @@ class Daemon(object):
if pid > 0:
# exit from second parent
sys.exit(0)
- except OSError, e:
+ except OSError as e:
LOG.error("fork #1 failed: %(errno)s, %(strerror)s",
{'errno': e.errno, 'strerror': e.strerror})
sys.exit(1)
@@ -116,12 +124,12 @@ class Daemon(object):
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
- # Try killing the daemon process
+ # Try killing the daemon process
try:
- while 1:
+ while True:
os.kill(pid, SIGTERM)
time.sleep(0.1)
- except OSError, err:
+ except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
@@ -139,16 +147,16 @@ class Daemon(object):
def run(self):
"""
- You should override this method when you subclass Daemon.
+ You should override this method when you subclass Daemon.
It will be called after the process has been
daemonized by start() or restart().
-
+
"""
pass
def daemon_die(self):
"""You should override this method when you shutdown daemon
this func will be call by stop() before kill the process
-
+
"""
pass
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/decorator.py b/testsuites/vstf/vstf_scripts/vstf/common/decorator.py
index 98d539f1..ed910556 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/decorator.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/decorator.py
@@ -17,7 +17,9 @@ def check(key, choices=[], defaults=_DEFAULTS):
if defaults != _DEFAULTS:
kwargs[key] = defaults
else:
- raise Exception("Error: '%s' is needed in %s" % (key, func))
+ raise Exception(
+ "Error: '%s' is needed in %s" %
+ (key, func))
if choices and kwargs[key] not in choices:
raise Exception("Error: %s :%s" % (key, kwargs[key]))
@@ -40,7 +42,9 @@ def dcheck(key, choices=[]):
values = None
if isinstance(values, dict):
if key not in values:
- raise Exception("Error: '%s' is needed in %s" % (key, func))
+ raise Exception(
+ "Error: '%s' is needed in %s" %
+ (key, func))
if choices and values[key] not in choices:
raise Exception("Error: %s :%s" % (key, values[key]))
ret = func(*args)
@@ -84,7 +88,8 @@ def namespace():
ret = func(*args, **kwargs)
nspace = kwargs.get("namespace", None)
if nspace:
- ret = "ip netns exec %(namespace)s " % {"namespace": nspace} + ret
+ ret = "ip netns exec %(namespace)s " % {
+ "namespace": nspace} + ret
return ret
return __deco
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/log.py b/testsuites/vstf/vstf_scripts/vstf/common/log.py
index 415b003a..b8b64888 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/log.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/log.py
@@ -34,7 +34,10 @@ def _init_log(log_file, level=logging.INFO, clevel=logging.INFO):
return file_handler, console
-def setup_logging(level=logging.INFO, log_file="/var/log/esp_test.log", clevel=logging.WARNING):
+def setup_logging(
+ level=logging.INFO,
+ log_file="/var/log/esp_test.log",
+ clevel=logging.WARNING):
log = logging.getLogger()
log.setLevel(level)
file_handler, console = _init_log(log_file, level, clevel)
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/pyhtml.py b/testsuites/vstf/vstf_scripts/vstf/common/pyhtml.py
index f3adee8d..b2162290 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/pyhtml.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/pyhtml.py
@@ -14,22 +14,124 @@ doc_type = '<!DOCTYPE HTML>\n'
default_title = "Html Page"
charset = '<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />\n'
-html4_tags = {'a', 'abbr', 'acronym', 'address', 'area', 'b', 'base', 'bdo', 'big',
- 'blockquote', 'body', 'br', 'button', 'caption', 'cite', 'code', 'col',
- 'colgroup', 'dd', 'del', 'div', 'dfn', 'dl', 'dt', 'em', 'fieldset',
- 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head',
- 'hr', 'html', 'i', 'iframe', 'img', 'input', 'ins', 'kbd',
- 'label', 'legend', 'li', 'link', 'map', 'menu', 'menuitem', 'meta',
- 'noframes', 'noscript', 'object', 'ol', 'optgroup', 'option', 'p',
- 'param', 'pre', 'q', 'samp', 'script', 'select', 'small', 'span', 'strong',
- 'style', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
- 'thead', 'title', 'tr', 'tt', 'ul', 'var'}
+html4_tags = {
+ 'a',
+ 'abbr',
+ 'acronym',
+ 'address',
+ 'area',
+ 'b',
+ 'base',
+ 'bdo',
+ 'big',
+ 'blockquote',
+ 'body',
+ 'br',
+ 'button',
+ 'caption',
+ 'cite',
+ 'code',
+ 'col',
+ 'colgroup',
+ 'dd',
+ 'del',
+ 'div',
+ 'dfn',
+ 'dl',
+ 'dt',
+ 'em',
+ 'fieldset',
+ 'form',
+ 'frame',
+ 'frameset',
+ 'h1',
+ 'h2',
+ 'h3',
+ 'h4',
+ 'h5',
+ 'h6',
+ 'head',
+ 'hr',
+ 'html',
+ 'i',
+ 'iframe',
+ 'img',
+ 'input',
+ 'ins',
+ 'kbd',
+ 'label',
+ 'legend',
+ 'li',
+ 'link',
+ 'map',
+ 'menu',
+ 'menuitem',
+ 'meta',
+ 'noframes',
+ 'noscript',
+ 'object',
+ 'ol',
+ 'optgroup',
+ 'option',
+ 'p',
+ 'param',
+ 'pre',
+ 'q',
+ 'samp',
+ 'script',
+ 'select',
+ 'small',
+ 'span',
+ 'strong',
+ 'style',
+ 'sub',
+ 'sup',
+ 'table',
+ 'tbody',
+ 'td',
+ 'textarea',
+ 'tfoot',
+ 'th',
+ 'thead',
+ 'title',
+ 'tr',
+ 'tt',
+ 'ul',
+ 'var'}
disused_tags = {'isindex', 'font', 'dir', 's', 'strike',
'u', 'center', 'basefont', 'applet', 'xmp'}
-html5_tags = {'article', 'aside', 'audio', 'bdi', 'canvas', 'command', 'datalist', 'details',
- 'dialog', 'embed', 'figcaption', 'figure', 'footer', 'header',
- 'keygen', 'mark', 'meter', 'nav', 'output', 'progress', 'rp', 'rt', 'ruby',
- 'section', 'source', 'summary', 'details', 'time', 'track', 'video', 'wbr'}
+html5_tags = {
+ 'article',
+ 'aside',
+ 'audio',
+ 'bdi',
+ 'canvas',
+ 'command',
+ 'datalist',
+ 'details',
+ 'dialog',
+ 'embed',
+ 'figcaption',
+ 'figure',
+ 'footer',
+ 'header',
+ 'keygen',
+ 'mark',
+ 'meter',
+ 'nav',
+ 'output',
+ 'progress',
+ 'rp',
+ 'rt',
+ 'ruby',
+ 'section',
+ 'source',
+ 'summary',
+ 'details',
+ 'time',
+ 'track',
+ 'video',
+ 'wbr'}
nl = '\n'
tags = html4_tags | disused_tags | html5_tags
@@ -105,7 +207,8 @@ class Tag(list):
result = ''
if self.tag_name:
result += '<%s%s%s>' % (self.tag_name,
- self._render_attr(), self._self_close() * ' /')
+ self._render_attr(),
+ self._self_close() * ' /')
if not self._self_close():
isnl = True
for c in self:
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/rsync.py b/testsuites/vstf/vstf_scripts/vstf/common/rsync.py
index 2209dfd3..03331368 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/rsync.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/rsync.py
@@ -10,7 +10,15 @@
# from __future__ import nested_scopes
-import os, os.path, shutil, glob, re, sys, getopt, stat, string
+import os
+import os.path
+import shutil
+import glob
+import re
+import sys
+import getopt
+import stat
+import string
try:
import win32file
@@ -19,6 +27,7 @@ except:
class Cookie:
+
def __init__(self):
self.sink_root = ""
self.target_root = ""
@@ -45,7 +54,7 @@ class Cookie:
def visit(cookie, dirname, names):
"""Copy files names from sink_root + (dirname - sink_root) to target_root + (dirname - sink_root)"""
if os.path.split(cookie.sink_root)[
- 1]: # Should be tested with (C:\Cvs -> C:\)! (C:\Archives\MyDatas\UltraEdit -> C:\Archives\MyDatas) (Cvs -> "")! (Archives\MyDatas\UltraEdit -> Archives\MyDatas) (\Cvs -> \)! (\Archives\MyDatas\UltraEdit -> Archives\MyDatas)
+ 1]: # Should be tested with (C:\Cvs -> C:\)! (C:\Archives\MyDatas\UltraEdit -> C:\Archives\MyDatas) (Cvs -> "")! (Archives\MyDatas\UltraEdit -> Archives\MyDatas) (\Cvs -> \)! (\Archives\MyDatas\UltraEdit -> Archives\MyDatas)
dirname = dirname[len(cookie.sink_root) + 1:]
else:
dirname = dirname[len(cookie.sink_root):]
@@ -81,7 +90,9 @@ def visit(cookie, dirname, names):
elif os.path.isdir(sink):
removeDir(cookie, sink)
else:
- logError("Sink %s is neither a file nor a folder (skip removal)" % sink)
+ logError(
+ "Sink %s is neither a file nor a folder (skip removal)" %
+ sink)
names_excluded += [names[name_index]]
del (names[name_index])
name_index = name_index - 1
@@ -95,7 +106,7 @@ def visit(cookie, dirname, names):
for name in os.listdir(target_dir):
if not cookie.delete_excluded and name in names_excluded:
continue
- if not name in names:
+ if name not in names:
target = os.path.join(target_dir, name)
if os.path.isfile(target):
removeFile(cookie, target)
@@ -122,7 +133,9 @@ def visit(cookie, dirname, names):
copyFile(cookie, sink, target)
else:
# file-???
- logError("Target %s is neither a file nor folder (skip update)" % sink)
+ logError(
+ "Target %s is neither a file nor folder (skip update)" %
+ sink)
elif os.path.isdir(sink):
if os.path.isfile(target):
@@ -131,7 +144,9 @@ def visit(cookie, dirname, names):
makeDir(cookie, target)
else:
# ???-xxx
- logError("Sink %s is neither a file nor a folder (skip update)" % sink)
+ logError(
+ "Sink %s is neither a file nor a folder (skip update)" %
+ sink)
elif not cookie.existing:
# When target dont exist:
@@ -142,7 +157,9 @@ def visit(cookie, dirname, names):
# folder
makeDir(cookie, target)
else:
- logError("Sink %s is neither a file nor a folder (skip update)" % sink)
+ logError(
+ "Sink %s is neither a file nor a folder (skip update)" %
+ sink)
def log(cookie, message):
@@ -166,7 +183,9 @@ def shouldUpdate(cookie, sink, target):
sink_sz = sink_st.st_size
sink_mt = sink_st.st_mtime
except:
- logError("Fail to retrieve information about sink %s (skip update)" % sink)
+ logError(
+ "Fail to retrieve information about sink %s (skip update)" %
+ sink)
return 0
try:
@@ -174,7 +193,9 @@ def shouldUpdate(cookie, sink, target):
target_sz = target_st.st_size
target_mt = target_st.st_mtime
except:
- logError("Fail to retrieve information about target %s (skip update)" % target)
+ logError(
+ "Fail to retrieve information about target %s (skip update)" %
+ target)
return 0
if cookie.update:
@@ -203,7 +224,7 @@ def copyFile(cookie, sink, target):
if cookie.time:
try:
s = os.stat(sink)
- os.utime(target, (s.st_atime, s.st_mtime));
+ os.utime(target, (s.st_atime, s.st_mtime))
except:
logError("Fail to copy timestamp of %s" % sink)
@@ -216,8 +237,9 @@ def updateFile(cookie, sink, target):
try:
if win32file:
filemode = win32file.GetFileAttributesW(target)
- win32file.SetFileAttributesW(target,
- filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
+ win32file.SetFileAttributesW(
+ target,
+ filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
else:
os.chmod(target, stat.S_IWUSR)
except:
@@ -228,10 +250,11 @@ def updateFile(cookie, sink, target):
if cookie.time:
try:
s = os.stat(sink)
- os.utime(target, (s.st_atime, s.st_mtime));
+ os.utime(target, (s.st_atime, s.st_mtime))
except:
- logError(
- "Fail to copy timestamp of %s" % sink) # The utime api of the 2.3 version of python is not unicode compliant.
+ # The utime api of the 2.3 version of python is not unicode
+ # compliant.
+ logError("Fail to copy timestamp of %s" % sink)
except:
logError("Fail to override %s" % sink)
@@ -242,8 +265,8 @@ def updateFile(cookie, sink, target):
def prepareRemoveFile(path):
if win32file:
filemode = win32file.GetFileAttributesW(path)
- win32file.SetFileAttributesW(path,
- filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
+ win32file.SetFileAttributesW(path, filemode & ~win32file.FILE_ATTRIBUTE_READONLY &
+ ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
else:
os.chmod(path, stat.S_IWUSR)
@@ -305,7 +328,8 @@ def convertPath(path):
if separator != "/":
path = re.sub(re.escape(separator), "/", path)
- # Help file, folder pattern to express that it should match the all file or folder name.
+ # Help file, folder pattern to express that it should match the all file
+ # or folder name.
path = "/" + path
return path
@@ -360,7 +384,7 @@ def convertPatterns(path, sign):
"""Read the files for pattern and return a vector of filters"""
filters = []
f = open(path, "r")
- while 1:
+ while True:
pattern = f.readline()
if not pattern:
break
@@ -428,8 +452,8 @@ def main(args):
cookie.relative = 1
elif o in ["-n", "--dry-run"]:
cookie.dry_run = 1
- elif o in ["-t", "--times",
- "--time"]: # --time is there to guaranty backward compatibility with previous buggy version.
+ # --time is there to guaranty backward compatibility with previous buggy version.
+ elif o in ["-t", "--times", "--time"]:
cookie.time = 1
elif o in ["-u", "--update"]:
cookie.update = 1
@@ -474,7 +498,7 @@ def main(args):
target_root = args[1]
try: # In order to allow compatibility below 2.3.
pass
- if os.path.__dict__.has_key("supports_unicode_filenames") and os.path.supports_unicode_filenames:
+ if "supports_unicode_filenames" in os.path.__dict__ and os.path.supports_unicode_filenames:
target_root = unicode(target_root, sys.getfilesystemencoding())
finally:
cookie.target_root = target_root
@@ -486,7 +510,7 @@ def main(args):
sink_families = {}
for sink in sinks:
try: # In order to allow compatibility below 2.3.
- if os.path.__dict__.has_key("supports_unicode_filenames") and os.path.supports_unicode_filenames:
+ if "supports_unicode_filenames" in os.path.__dict__ and os.path.supports_unicode_filenames:
sink = unicode(sink, sys.getfilesystemencoding())
except:
pass
@@ -499,7 +523,7 @@ def main(args):
break
sink_root, sink_name = os.path.split(sink_root)
sink_root = sink_drive + sink_root
- if not sink_families.has_key(sink_root):
+ if sink_root not in sink_families:
sink_families[sink_root] = []
sink_families[sink_root] = sink_families[sink_root] + [sink_name]
@@ -509,15 +533,28 @@ def main(args):
else:
cookie.sink_root = sink_root
- global y # In order to allow compatibility below 2.1 (nested scope where used before).
+ # In order to allow compatibility below 2.1 (nested scope where used
+ # before).
+ global y
y = sink_root
- files = filter(lambda x: os.path.isfile(os.path.join(y, x)), sink_families[sink_root])
+ files = filter(
+ lambda x: os.path.isfile(
+ os.path.join(
+ y,
+ x)),
+ sink_families[sink_root])
if files:
visit(cookie, sink_root, files)
- # global y # In order to allow compatibility below 2.1 (nested scope where used before).
+ # global y # In order to allow compatibility below 2.1 (nested scope
+ # where used before).
y = sink_root
- folders = filter(lambda x: os.path.isdir(os.path.join(y, x)), sink_families[sink_root])
+ folders = filter(
+ lambda x: os.path.isdir(
+ os.path.join(
+ y,
+ x)),
+ sink_families[sink_root])
for folder in folders:
folder_path = os.path.join(sink_root, folder)
if not cookie.recursive:
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/saltstack.py b/testsuites/vstf/vstf_scripts/vstf/common/saltstack.py
index 030bef5d..96bdc911 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/saltstack.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/saltstack.py
@@ -32,14 +32,16 @@ class Mysalt(object):
cmds.execute("grep '^pillar_roots' \
/etc/salt/master -A 2 | sed 1,2d | awk '{print $2}'") + '/')
if self.pillar_path == "":
- log.warning("pillar path not found, make sure the pillar_roots configed")
+ log.warning(
+ "pillar path not found, make sure the pillar_roots configed")
else:
os.system("mkdir -p " + self.pillar_path)
self.state_path = str(cmds.execute("grep '^file_roots' \
/etc/salt/master -A 2 | sed 1,2d | awk '{print $2}'") + '/')
if self.state_path == "":
- log.warning("state path not found, make sure the file_roots configed")
+ log.warning(
+ "state path not found, make sure the file_roots configed")
else:
os.system("mkdir -p " + self.state_path)
@@ -72,7 +74,8 @@ class Mysalt(object):
elif flag == "state":
dst = self.state_path
else:
- log.error("this file or dir not pillar or state, can not support now.")
+ log.error(
+ "this file or dir not pillar or state, can not support now.")
return False
if self.IS_FILE == self.__is_dir_or_file(target):
@@ -125,20 +128,27 @@ class Mysalt(object):
num_s += 1
else:
num_f += 1
- msg = msg + self.__luxuriant_line("Failed %d:\n" % num_f, "red")
+ msg = msg + \
+ self.__luxuriant_line("Failed %d:\n" % num_f, "red")
msg = msg + "\t" + key + '\n'
- msg = msg + self.__luxuriant_line("\t%s\n" % ret[host][key]['comment'], "red")
- if True == ret[host][key]['changes'].has_key('retcode'):
- msg = msg + "RETCODE: %s\n" % (ret[host][key]['changes']['retcode'])
- if True == ret[host][key]['changes'].has_key('stderr'):
- msg = msg + "STDERR: %s\n" % (ret[host][key]['changes']['stderr'])
- if True == ret[host][key]['changes'].has_key('stdout'):
- msg = msg + "STDOUT: %s\n" % (ret[host][key]['changes']['stdout'])
- msg = msg + self.__luxuriant_line("total success: %d\n" % num_s, "green")
+ msg = msg + \
+ self.__luxuriant_line("\t%s\n" % ret[host][key]['comment'], "red")
+ if True == ('retcode' in ret[host][key]['changes']):
+ msg = msg + \
+ "RETCODE: %s\n" % (ret[host][key]['changes']['retcode'])
+ if True == ('stderr' in ret[host][key]['changes']):
+ msg = msg + \
+ "STDERR: %s\n" % (ret[host][key]['changes']['stderr'])
+ if True == ('stdout' in ret[host][key]['changes']):
+ msg = msg + \
+ "STDOUT: %s\n" % (ret[host][key]['changes']['stdout'])
+ msg = msg + \
+ self.__luxuriant_line("total success: %d\n" % num_s, "green")
msg = msg + self.__luxuriant_line("failed: %d\n" % num_f, "red")
except Exception as e:
- log.error("sorry, thy to check result happend error, <%(e)s>.\nret:%(ret)s",
- {'e': e, 'ret': ret})
+ log.error(
+ "sorry, thy to check result happend error, <%(e)s>.\nret:%(ret)s", {
+ 'e': e, 'ret': ret})
return -1
log.info(':\n' + msg)
return num_f
@@ -147,7 +157,9 @@ class Mysalt(object):
try:
log.info("salt " + host + " state.sls " +
fstate + ' pillar=\'' + str(ext_pillar) + '\'')
- ret = self.salt.cmd(host, 'state.sls', [fstate, 'pillar=' + str(ext_pillar)], 180, 'list')
+ ret = self.salt.cmd(
+ host, 'state.sls', [
+ fstate, 'pillar=' + str(ext_pillar)], 180, 'list')
except Exception as e:
log.error("try to init host %(host)s happend error: <%(e)s>.",
{'host': host, 'e': e})
@@ -170,7 +182,7 @@ class Mysalt(object):
return ret
def copy_by_state(self, host, src, state_cmd, **kwargs):
- '''the src must be a dir, and the state.sls
+ '''the src must be a dir, and the state.sls
must be the name of the dir name'''
if not self.slave_exists(host):
@@ -184,10 +196,12 @@ class Mysalt(object):
def get_master_ip(self, host=None):
if not host:
- ret = cmds.execute("grep '^interface:' /etc/salt/master | awk '{print $2}'").strip()
+ ret = cmds.execute(
+ "grep '^interface:' /etc/salt/master | awk '{print $2}'").strip()
return ret
try:
- ret = self.salt.cmd(host, "grains.item", ["master"])[host]['master']
+ ret = self.salt.cmd(host, "grains.item", ["master"])[
+ host]['master']
except Exception:
log.error("salt happened error when get master ip")
return ""
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/ssh.py b/testsuites/vstf/vstf_scripts/vstf/common/ssh.py
index 7b85e086..5cf196d4 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/ssh.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/ssh.py
@@ -19,6 +19,7 @@ LOG = logging.getLogger(__name__)
class SSHClientContext(paramiko.SSHClient):
+
def __init__(self, ip, user, passwd, port=22):
self.host = ip
self.user = user
@@ -31,11 +32,20 @@ class SSHClientContext(paramiko.SSHClient):
ret = stdout.channel.recv_exit_status()
out = stdout.read().strip()
err = stderr.read().strip()
- LOG.info("in %s,%s,return:%s,output:%s:error:%s" % (self.host, cmd, ret, out, err))
+ LOG.info(
+ "in %s,%s,return:%s,output:%s:error:%s" %
+ (self.host, cmd, ret, out, err))
return ret, out, err
def connect(self):
- super(SSHClientContext, self).connect(self.host, self.port, self.user, self.passwd, timeout=10)
+ super(
+ SSHClientContext,
+ self).connect(
+ self.host,
+ self.port,
+ self.user,
+ self.passwd,
+ timeout=10)
def __enter__(self):
self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
@@ -48,6 +58,7 @@ class SSHClientContext(paramiko.SSHClient):
class SFTPClientContext(object):
+
def __init__(self, ip, user, passwd, port=22):
self.host = ip
self.passwd = passwd
@@ -97,7 +108,9 @@ def upload_dir(host, user, passwd, local_dir, remote_dir):
remote_dir = os.path.join(remote_dir, os.path.basename(local_dir))
ret, _, _ = run_cmd(host, user, passwd, "sudo rm -rf %s" % remote_dir)
if ret != 0 and ret != 1:
- LOG.error("somehow failed in rm -rf %s on host:%s,return:%s" % (remote_dir, host, ret))
+ LOG.error(
+ "somehow failed in rm -rf %s on host:%s,return:%s" %
+ (remote_dir, host, ret))
exit(1)
with SFTPClientContext(host, user, passwd) as sftp:
sftp.connect()
@@ -117,7 +130,7 @@ def upload_dir(host, user, passwd, local_dir, remote_dir):
try:
sftp.mkdir(remote_path)
LOG.info("mkdir path %s" % remote_path)
- except Exception, e:
+ except Exception as e:
raise
return remote_dir
@@ -177,7 +190,9 @@ def download_dir(host, user, passwd, remote_path, local_path):
dest_path = local_path
else:
raise Exception('path:%s is not exists' % dir_name)
- LOG.info("download_dir from host:%s:%s to dest:%s" % (host, remote_path, dest_path))
+ LOG.info(
+ "download_dir from host:%s:%s to dest:%s" %
+ (host, remote_path, dest_path))
transport = paramiko.Transport((host, 22))
transport.connect(username=user, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport)
@@ -189,7 +204,8 @@ def download_dir(host, user, passwd, remote_path, local_path):
path = q.get()
st = sftp.lstat(path).st_mode
relative_path = path[len(remote_path):]
- if relative_path.startswith('/'): relative_path = relative_path[1:]
+ if relative_path.startswith('/'):
+ relative_path = relative_path[1:]
local = os.path.join(dest_path, relative_path)
if os.path.exists(local):
shutil.rmtree(local)
@@ -206,7 +222,9 @@ def download_dir(host, user, passwd, remote_path, local_path):
sftp.get(fullpath, dest)
os.chmod(dest, st)
else:
- raise Exception('path:%s:%s not exists or is not a dir' % (host, remote_path))
+ raise Exception(
+ 'path:%s:%s not exists or is not a dir' %
+ (host, remote_path))
return dest_path
@@ -218,6 +236,7 @@ def run_cmd(host, user, passwd, cmd):
class SshFileTransfer(object):
+
def __init__(self, ip, user, passwd):
self.ip, self.user, self.passwd = ip, user, passwd
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/test_func.py b/testsuites/vstf/vstf_scripts/vstf/common/test_func.py
index 2a9a4c0d..3fa23cdc 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/test_func.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/test_func.py
@@ -20,4 +20,4 @@ from vstf.common import cliutil as util
help="a params of test-xx")
def do_test_xx(args):
"""this is a help doc"""
- print "run test01 " + args.test + args.xx \ No newline at end of file
+ print "run test01 " + args.test + args.xx
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/unix.py b/testsuites/vstf/vstf_scripts/vstf/common/unix.py
index 97582c74..ac3c9b72 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/unix.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/unix.py
@@ -14,49 +14,51 @@ from vstf.common import message
class UdpServer(object):
+
def __init__(self):
super(UdpServer, self).__init__()
try:
os.unlink(constants.sockaddr)
except OSError:
if os.path.exists(constants.sockaddr):
- raise Exception("socket not found %s" % constants.sockaddr)
- self.conn=socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-
- def listen(self,backlog=5):
+ raise Exception("socket not found %s" % constants.sockaddr)
+ self.conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+ def listen(self, backlog=5):
self.conn.listen(backlog)
-
+
def accept(self):
return self.conn.accept()
-
+
def bind(self, addr=constants.sockaddr):
return self.conn.bind(addr)
-
+
# def send(self, data, addr):
# return message.sendto(self.conn.sendto, data, addr)
-
+
# def recv(self, size=constants.buff_size):
# return message.recv(self.conn.recvfrom)
-
+
def close(self):
self.conn.close()
class UdpClient(object):
+
def __init__(self):
super(UdpClient, self).__init__()
if not os.path.exists(constants.sockaddr):
- raise Exception("socket not found %s" % constants.sockaddr)
- self.conn=socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-
+ raise Exception("socket not found %s" % constants.sockaddr)
+ self.conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
def connect(self, addr=constants.sockaddr):
return self.conn.connect(addr)
-
+
def send(self, data):
message.send(self.conn.send, data)
-
+
def recv(self):
return message.recv(self.conn.recv)
-
+
def close(self):
- self.conn.close() \ No newline at end of file
+ self.conn.close()
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/utils.py b/testsuites/vstf/vstf_scripts/vstf/common/utils.py
index f2e14096..e9ee2791 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/utils.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/utils.py
@@ -82,7 +82,7 @@ def my_mkdir(filepath):
try:
LOG.info("mkdir -p %s" % filepath)
os.makedirs(filepath)
- except OSError, e:
+ except OSError as e:
if e.errno == 17:
LOG.info("! %s already exists" % filepath)
else:
@@ -107,7 +107,9 @@ def check_and_kill(process):
def list_mods():
- return check_output("lsmod | sed 1,1d | awk '{print $1}'", shell=True).split()
+ return check_output(
+ "lsmod | sed 1,1d | awk '{print $1}'",
+ shell=True).split()
def check_and_rmmod(mod):
@@ -144,6 +146,7 @@ def randomMAC():
class IPCommandHelper(object):
+
def __init__(self, ns=None):
self.devices = []
self.macs = []
@@ -174,7 +177,10 @@ class IPCommandHelper(object):
cmd = "ip netns exec %s " % ns + cmd
for device in self.devices:
buf = check_output(cmd % device, shell=True)
- bdfs = re.findall(r'^bus-info: \d{4}:(\d{2}:\d{2}\.\d*)$', buf, re.MULTILINE)
+ bdfs = re.findall(
+ r'^bus-info: \d{4}:(\d{2}:\d{2}\.\d*)$',
+ buf,
+ re.MULTILINE)
if bdfs:
self.bdf_device_map[bdfs[0]] = device
self.device_bdf_map[device] = bdfs[0]
@@ -188,7 +194,9 @@ class IPCommandHelper(object):
if ns:
cmd = "ip netns exec %s " % ns + cmd
buf = check_output(cmd, shell=True)
- macs = re.compile(r"[A-F0-9]{2}(?::[A-F0-9]{2}){5}", re.IGNORECASE | re.MULTILINE)
+ macs = re.compile(
+ r"[A-F0-9]{2}(?::[A-F0-9]{2}){5}",
+ re.IGNORECASE | re.MULTILINE)
for mac in macs.findall(buf):
if mac.lower() not in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
return mac
diff --git a/testsuites/vstf/vstf_scripts/vstf/common/vstfcli.py b/testsuites/vstf/vstf_scripts/vstf/common/vstfcli.py
index 896bb1d6..ae4fecfb 100644
--- a/testsuites/vstf/vstf_scripts/vstf/common/vstfcli.py
+++ b/testsuites/vstf/vstf_scripts/vstf/common/vstfcli.py
@@ -12,6 +12,7 @@ import sys
class VstfHelpFormatter(argparse.HelpFormatter):
+
def start_section(self, heading):
# Title-case the headings
heading = '%s%s' % (heading[0].upper(), heading[1:])
@@ -19,6 +20,7 @@ class VstfHelpFormatter(argparse.HelpFormatter):
class VstfParser(argparse.ArgumentParser):
+
def __init__(self,
prog='vstf',
description="",
@@ -41,11 +43,12 @@ class VstfParser(argparse.ArgumentParser):
desc = callback.__doc__ or ''
action_help = desc.strip()
arguments = getattr(callback, 'arguments', [])
- subparser = subparsers.add_parser(command,
- help=action_help,
- description=desc,
- add_help=False,
- formatter_class=VstfHelpFormatter)
+ subparser = subparsers.add_parser(
+ command,
+ help=action_help,
+ description=desc,
+ add_help=False,
+ formatter_class=VstfHelpFormatter)
subparser.add_argument('-h', '--help',
action='help',
help=argparse.SUPPRESS)
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/api_server.py b/testsuites/vstf/vstf_scripts/vstf/controller/api_server.py
index a37bf4c1..02bf486c 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/api_server.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/api_server.py
@@ -45,10 +45,11 @@ cmd = CommandLine()
class OpsChains(object):
+
def __init__(self, monitor, port):
"""The ops chains will setup the proxy to rabbitmq
and setup a thread to watch the queues of rabbitmq
-
+
"""
LOG.info("VSTF Manager start to listen to %s", monitor)
if not os.path.exists(cst.VSTFCPATH):
@@ -63,7 +64,8 @@ class OpsChains(object):
if not target:
respond = "the target is empty, not support now."
else:
- respond = self.chanl.call(self.chanl.make_msg("list_nic_devices"), target)
+ respond = self.chanl.call(
+ self.chanl.make_msg("list_nic_devices"), target)
return respond
def src_install(self, host, config_file):
@@ -118,8 +120,8 @@ class OpsChains(object):
return Fabricant(host, self.chanl).affctl_list()
def _create_task(self, scenario):
- taskid = self.dbconn.create_task(str(uuid.uuid4()), time.strftime(cst.TIME_FORMAT),
- desc=scenario + "Test")
+ taskid = self.dbconn.create_task(str(uuid.uuid4()), time.strftime(
+ cst.TIME_FORMAT), desc=scenario + "Test")
LOG.info("new Task id:%s" % taskid)
if -1 == taskid:
raise Exception("DB create task failed.")
@@ -142,7 +144,8 @@ class OpsChains(object):
LOG.info(nic_info)
- os_info, cpu_info, mem_info, hw_info = self.collection.collect_host_info(host["agent"])
+ os_info, cpu_info, mem_info, hw_info = self.collection.collect_host_info(host[
+ "agent"])
LOG.info(os_info)
LOG.info(cpu_info)
LOG.info(mem_info)
@@ -165,11 +168,11 @@ class OpsChains(object):
forward_settings = ForwardingSettings()
head_d = {
"ip": head,
- "namespace":forward_settings.settings["head"]["namespace"]
+ "namespace": forward_settings.settings["head"]["namespace"]
}
tail_d = {
"ip": tail,
- "namespace":forward_settings.settings["tail"]["namespace"]
+ "namespace": forward_settings.settings["tail"]["namespace"]
}
LOG.info(head_d)
LOG.info(tail_d)
@@ -184,10 +187,19 @@ class OpsChains(object):
info_str = "do report over"
return info_str
- def run_perf_cmd(self, case, rpath='./', affctl=False, build_on=False, save_on=False, report_on=False,
- mail_on=False):
+ def run_perf_cmd(
+ self,
+ case,
+ rpath='./',
+ affctl=False,
+ build_on=False,
+ save_on=False,
+ report_on=False,
+ mail_on=False):
LOG.info(case)
- LOG.info("build_on:%s report_on:%s mail_on:%s" % (build_on, report_on, mail_on))
+ LOG.info(
+ "build_on:%s report_on:%s mail_on:%s" %
+ (build_on, report_on, mail_on))
casetag = case['case']
tool = case['tool']
protocol = case['protocol']
@@ -216,7 +228,10 @@ class OpsChains(object):
tool_settings = ToolSettings()
tester_settings = TesterSettings()
flow_producer = FlowsProducer(self.chanl, flows_settings)
- provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+ provider = PerfProvider(
+ flows_settings.settings,
+ tool_settings.settings,
+ tester_settings.settings)
perf = pf.Performance(self.chanl, provider)
flow_producer.create(scenario, casetag)
@@ -225,20 +240,29 @@ class OpsChains(object):
LOG.info(result)
if save_on:
taskid = self._create_task(scenario)
- testid = self.dbconn.add_test_2task(taskid, casetag, protocol, ttype, switch, provider, tool)
+ testid = self.dbconn.add_test_2task(
+ taskid, casetag, protocol, ttype, switch, provider, tool)
LOG.info(testid)
self.dbconn.add_data_2test(testid, result)
if report_on:
self.report(rpath, not mail_on, taskid)
return result
- def run_perf_file(self, rpath='./', affctl=False, report_on=True, mail_on=True):
+ def run_perf_file(
+ self,
+ rpath='./',
+ affctl=False,
+ report_on=True,
+ mail_on=True):
perf_settings = PerfSettings()
flows_settings = FlowsSettings()
tool_settings = ToolSettings()
tester_settings = TesterSettings()
flow_producer = FlowsProducer(self.chanl, flows_settings)
- provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+ provider = PerfProvider(
+ flows_settings.settings,
+ tool_settings.settings,
+ tester_settings.settings)
perf = pf.Performance(self.chanl, provider)
tests = perf_settings.settings
@@ -274,7 +298,8 @@ class OpsChains(object):
result = perf.run(tool, protocol, ttype, sizes, affctl)
LOG.info(result)
- testid = self.dbconn.add_test_2task(taskid, casetag, protocol, ttype, switch, provider, tool)
+ testid = self.dbconn.add_test_2task(
+ taskid, casetag, protocol, ttype, switch, provider, tool)
LOG.info(testid)
self.dbconn.add_data_2test(testid, result)
@@ -293,6 +318,7 @@ class OpsChains(object):
class Manager(daemon.Daemon):
+
def __init__(self):
"""
The manager will create a socket for vstfadm.
@@ -356,13 +382,16 @@ class Manager(daemon.Daemon):
self.daemon_die()
raise e
except Exception as e:
- # here just the function failed no need exit, just return the msg
+ # here just the function failed no need exit, just return
+ # the msg
msg = "Run function failed. [ %s ]" % (e)
response = msg
LOG.error(msg)
try:
response = message.add_context(response, **context)
- LOG.debug("Manager send the response: <%(r)s", {'r': response})
+ LOG.debug(
+ "Manager send the response: <%(r)s", {
+ 'r': response})
message.send(conn.send, message.encode(response))
except Exception as e:
self.daemon_die()
@@ -374,7 +403,8 @@ class Manager(daemon.Daemon):
"""overwrite daemon.Daemon.daemon_die(self)"""
LOG.info("manage catch the signal %s to exit." % signum)
if self.conn:
- # we can not close the conn direct, just tell manager to stop accept
+ # we can not close the conn direct, just tell manager to stop
+ # accept
self.run_flag = False
if self.ops:
@@ -418,8 +448,13 @@ def do_stop(args):
def main():
"""this is for vstfctl"""
- setup_logging(level=logging.INFO, log_file="/var/log/vstf/vstf-manager.log", clevel=logging.INFO)
- parser = VstfParser(prog="vstf-manager", description="vstf manager command line")
+ setup_logging(
+ level=logging.INFO,
+ log_file="/var/log/vstf/vstf-manager.log",
+ clevel=logging.INFO)
+ parser = VstfParser(
+ prog="vstf-manager",
+ description="vstf manager command line")
parser.set_subcommand_parser(target=sys.modules[__name__])
args = parser.parse_args()
args.func(args)
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/database/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/database/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/database/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/database/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/database/dbinterface.py b/testsuites/vstf/vstf_scripts/vstf/controller/database/dbinterface.py
index 410e1ee5..a2aad9e8 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/database/dbinterface.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/database/dbinterface.py
@@ -33,6 +33,7 @@ def after_cursor_execute(conn, cursor, statement,
class DbManage(object):
+
def __init__(self, db_name=const.DBPATH):
db_exists = os.path.exists(db_name)
try:
@@ -165,9 +166,18 @@ class DbManage(object):
else:
return 0
- def add_test_2task(self, task, case, protocol, typ, switch, provider, tool):
+ def add_test_2task(
+ self,
+ task,
+ case,
+ protocol,
+ typ,
+ switch,
+ provider,
+ tool):
try:
- item = table.TblTestList(task, case, protocol, typ, switch, provider, tool)
+ item = table.TblTestList(
+ task, case, protocol, typ, switch, provider, tool)
self._session.add(item)
self._session.commit()
except Exception:
@@ -236,7 +246,8 @@ class DbManage(object):
ret = self._session.query(table.TblTaskList)
if ret:
for tmp in ret.all():
- result.append([tmp.TaskID, tmp.TaskName, tmp.Date, tmp.EXTInfo])
+ result.append(
+ [tmp.TaskID, tmp.TaskName, tmp.Date, tmp.EXTInfo])
return result
def query_all_task_id(self):
@@ -255,7 +266,9 @@ class DbManage(object):
return query.all()
def query_scenario(self, casetag):
- query = self._session.query(table.TblCaseInfo.ScenarioName).filter(table.TblCaseInfo.CaseTag == casetag)
+ query = self._session.query(
+ table.TblCaseInfo.ScenarioName).filter(
+ table.TblCaseInfo.CaseTag == casetag)
ret = ""
if query and query.first():
ret = query.first()[0]
@@ -282,10 +295,13 @@ class DbManage(object):
# Single TblTestList API
def query_caselist(self, taskid, scenario):
- query = self._session.query(table.TblTestList.CaseTag).filter(and_(
- table.TblTestList.CaseTag == table.TblCaseInfo.CaseTag,
- table.TblCaseInfo.ScenarioName == scenario,
- table.TblTestList.TaskID == taskid)).group_by(table.TblCaseInfo.CaseTag)
+ query = self._session.query(
+ table.TblTestList.CaseTag).filter(
+ and_(
+ table.TblTestList.CaseTag == table.TblCaseInfo.CaseTag,
+ table.TblCaseInfo.ScenarioName == scenario,
+ table.TblTestList.TaskID == taskid)).group_by(
+ table.TblCaseInfo.CaseTag)
return query.all()
def query_testlist(self, taskid, scenario):
@@ -308,65 +324,85 @@ class DbManage(object):
return query.all()
def query_casetools(self, taskid, casetag):
- query = self._session.query(table.TblTestList.Tools).filter(and_(
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.TaskID == taskid)).group_by(table.TblTestList.Tools)
+ query = self._session.query(
+ table.TblTestList.Tools).filter(
+ and_(
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.TaskID == taskid)).group_by(
+ table.TblTestList.Tools)
return query.all()
def query_scenariolist(self, taskid):
- query = self._session.query(table.TblCaseInfo.ScenarioName).filter(and_(
- table.TblTestList.CaseTag == table.TblCaseInfo.CaseTag,
- table.TblTestList.TaskID == taskid)).group_by(table.TblCaseInfo.ScenarioName)
+ query = self._session.query(
+ table.TblCaseInfo.ScenarioName).filter(
+ and_(
+ table.TblTestList.CaseTag == table.TblCaseInfo.CaseTag,
+ table.TblTestList.TaskID == taskid)).group_by(
+ table.TblCaseInfo.ScenarioName)
return query.all()
def query_throughput_load(self, taskid, casetag, provider):
ptype = 'throughput'
- query = self._session.query(table.TblThroughput.AvgFrameSize, table.TblThroughput.OfferedLoad).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblThroughput.TestID))
+ query = self._session.query(
+ table.TblThroughput.AvgFrameSize,
+ table.TblThroughput.OfferedLoad).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblThroughput.TestID))
return query.all()
def query_throughput_bandwidth(self, taskid, casetag, provider):
ptype = 'throughput'
- query = self._session.query(table.TblThroughput.AvgFrameSize, table.TblThroughput.Bandwidth).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblThroughput.TestID))
+ query = self._session.query(
+ table.TblThroughput.AvgFrameSize,
+ table.TblThroughput.Bandwidth).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblThroughput.TestID))
return query.all()
def query_throughput_table(self, taskid, casetag, provider):
ptype = 'throughput'
- query = self._session.query(table.TblThroughput.AvgFrameSize,
- table.TblThroughput.Bandwidth,
- table.TblThroughput.OfferedLoad,
- table.TblThroughput.CPU,
- table.TblThroughput.MppspGhz,
- table.TblThroughput.MinimumLatency,
- table.TblThroughput.MaximumLatency,
- table.TblThroughput.AverageLatency,
- ).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblThroughput.TestID))
+ query = self._session.query(
+ table.TblThroughput.AvgFrameSize,
+ table.TblThroughput.Bandwidth,
+ table.TblThroughput.OfferedLoad,
+ table.TblThroughput.CPU,
+ table.TblThroughput.MppspGhz,
+ table.TblThroughput.MinimumLatency,
+ table.TblThroughput.MaximumLatency,
+ table.TblThroughput.AverageLatency,
+ ).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblThroughput.TestID))
return query.all()
def query_throughput_simpletable(self, taskid, casetag, provider):
ptype = 'throughput'
- query = self._session.query(table.TblThroughput.AvgFrameSize,
- table.TblThroughput.Bandwidth,
- table.TblThroughput.OfferedLoad,
- table.TblThroughput.CPU,
- table.TblThroughput.MppspGhz,
- table.TblThroughput.AverageLatency,
- ).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblThroughput.TestID))
+ query = self._session.query(
+ table.TblThroughput.AvgFrameSize,
+ table.TblThroughput.Bandwidth,
+ table.TblThroughput.OfferedLoad,
+ table.TblThroughput.CPU,
+ table.TblThroughput.MppspGhz,
+ table.TblThroughput.AverageLatency,
+ ).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblThroughput.TestID))
return query.all()
def query_testdata(self, testid, ptype):
@@ -376,79 +412,103 @@ class DbManage(object):
def query_throughput_avg(self, taskid, casetag, provider):
ptype = 'throughput'
- query = self._session.query(table.TblThroughput.AvgFrameSize, table.TblThroughput.AverageLatency).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblThroughput.TestID))
+ query = self._session.query(
+ table.TblThroughput.AvgFrameSize,
+ table.TblThroughput.AverageLatency).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblThroughput.TestID))
return query.all()
def query_frameloss_bandwidth(self, taskid, casetag, provider):
ptype = 'frameloss'
- query = self._session.query(table.TblFrameloss.AvgFrameSize, table.TblFrameloss.Bandwidth).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblFrameloss.TestID))
+ query = self._session.query(
+ table.TblFrameloss.AvgFrameSize,
+ table.TblFrameloss.Bandwidth).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblFrameloss.TestID))
return query.all()
def query_frameloss_load(self, taskid, casetag, provider):
ptype = 'frameloss'
- query = self._session.query(table.TblFrameloss.AvgFrameSize, table.TblFrameloss.OfferedLoad).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblFrameloss.TestID))
+ query = self._session.query(
+ table.TblFrameloss.AvgFrameSize,
+ table.TblFrameloss.OfferedLoad).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblFrameloss.TestID))
return query.all()
def query_frameloss_table(self, taskid, casetag, provider):
ptype = 'frameloss'
- query = self._session.query(table.TblFrameloss.AvgFrameSize,
- table.TblFrameloss.Bandwidth,
- table.TblFrameloss.OfferedLoad,
- table.TblFrameloss.CPU,
- table.TblFrameloss.MppspGhz,
- table.TblFrameloss.MinimumLatency,
- table.TblFrameloss.MaximumLatency,
- table.TblFrameloss.AverageLatency
- ).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblFrameloss.TestID))
+ query = self._session.query(
+ table.TblFrameloss.AvgFrameSize,
+ table.TblFrameloss.Bandwidth,
+ table.TblFrameloss.OfferedLoad,
+ table.TblFrameloss.CPU,
+ table.TblFrameloss.MppspGhz,
+ table.TblFrameloss.MinimumLatency,
+ table.TblFrameloss.MaximumLatency,
+ table.TblFrameloss.AverageLatency).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblFrameloss.TestID))
return query.all()
def query_frameloss_simpletable(self, taskid, casetag, provider):
ptype = 'frameloss'
- query = self._session.query(table.TblFrameloss.AvgFrameSize,
- table.TblFrameloss.Bandwidth,
- table.TblFrameloss.OfferedLoad,
- table.TblFrameloss.CPU,
- table.TblFrameloss.MppspGhz,
- table.TblFrameloss.AverageLatency
- ).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblFrameloss.TestID))
+ query = self._session.query(
+ table.TblFrameloss.AvgFrameSize,
+ table.TblFrameloss.Bandwidth,
+ table.TblFrameloss.OfferedLoad,
+ table.TblFrameloss.CPU,
+ table.TblFrameloss.MppspGhz,
+ table.TblFrameloss.AverageLatency).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblFrameloss.TestID))
return query.all()
def query_frameloss_avg(self, taskid, casetag, provider):
ptype = 'frameloss'
- query = self._session.query(table.TblFrameloss.AvgFrameSize, table.TblFrameloss.AverageLatency).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblFrameloss.TestID))
+ query = self._session.query(
+ table.TblFrameloss.AvgFrameSize,
+ table.TblFrameloss.AverageLatency).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblFrameloss.TestID))
return query.all()
def query_latency_avg(self, taskid, casetag, provider):
ptype = 'latency'
- query = self._session.query(table.TblLatency.AvgFrameSize, table.TblLatency.AverageLatency).filter(and_(
- table.TblTestList.TaskID == taskid,
- table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
- table.TblTestList.TestID == table.TblLatency.TestID))
+ query = self._session.query(
+ table.TblLatency.AvgFrameSize,
+ table.TblLatency.AverageLatency).filter(
+ and_(
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TestID == table.TblLatency.TestID))
return query.all()
def query_summary_table(self, taskid, casetag, provider, ptype):
@@ -482,51 +542,71 @@ class DbManage(object):
return []
def query_throughput_provider(self, taskid, casetag, provider):
- query = self._session.query(table.TblThroughput).filter(and_(table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider,
- table.TblTestList.TaskID == taskid,
- table.TblTestList.TestID == table.TblThroughput.TestID))
+ query = self._session.query(
+ table.TblThroughput).filter(
+ and_(
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.TestID == table.TblThroughput.TestID))
return query.all()
def query_frameloss_provider(self, taskid, casetag, provider):
- query = self._session.query(table.TblFrameloss).filter(and_(table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider,
- table.TblTestList.TaskID == taskid,
- table.TblTestList.TestID == table.TblFrameloss.TestID))
+ query = self._session.query(
+ table.TblFrameloss).filter(
+ and_(
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.TestID == table.TblFrameloss.TestID))
return query.all()
def query_latency_provider(self, taskid, casetag, provider):
- query = self._session.query(table.TblLatency).filter(and_(table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider,
- table.TblTestList.TaskID == taskid,
- table.TblTestList.TestID == table.TblLatency.TestID))
+ query = self._session.query(
+ table.TblLatency).filter(
+ and_(
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.TaskID == taskid,
+ table.TblTestList.TestID == table.TblLatency.TestID))
return query.all()
def query_case_type_count(self, taskid, casetag, ptype):
- query = self._session.query(table.TblTestList).filter(and_(table.TblTestList.CaseTag == casetag,
- table.TblTestList.Type == ptype,
- table.TblTestList.TaskID == taskid))
+ query = self._session.query(
+ table.TblTestList).filter(
+ and_(
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.TaskID == taskid))
return query.count()
def query_case_provider_count(self, taskid, casetag, provider):
- query = self._session.query(table.TblTestList).filter(and_(table.TblTestList.CaseTag == casetag,
- table.TblTestList.Provider == provider,
- table.TblTestList.TaskID == taskid))
+ query = self._session.query(
+ table.TblTestList).filter(
+ and_(
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.TaskID == taskid))
return query.count()
def query_case_type_provider_count(self, taskid, casetag, provider, ptype):
- query = self._session.query(table.TblTestList).filter(and_(table.TblTestList.CaseTag == casetag,
- table.TblTestList.Type == ptype,
- table.TblTestList.Provider == provider,
- table.TblTestList.TaskID == taskid))
+ query = self._session.query(
+ table.TblTestList).filter(
+ and_(
+ table.TblTestList.CaseTag == casetag,
+ table.TblTestList.Type == ptype,
+ table.TblTestList.Provider == provider,
+ table.TblTestList.TaskID == taskid))
return query.count()
def query_exten_info(self, taskid):
- query = self._session.query(table.TblEXTInfo.EXTName,
- table.TblEXTInfo.EXTContent,
- table.TblEXTInfo.Description).filter(table.TblEXTInfo.TaskID == taskid)
+ query = self._session.query(
+ table.TblEXTInfo.EXTName,
+ table.TblEXTInfo.EXTContent,
+ table.TblEXTInfo.Description).filter(
+ table.TblEXTInfo.TaskID == taskid)
return query.all()
@@ -534,12 +614,27 @@ def unit_test():
import time
dbase = DbManage()
- taskid = dbase.create_task("test", str(time.ctime()), "this is a unit test")
- dbase.add_host_2task(taskid, "hosta", "hw82576", "xxx", "x", "82599", "ubuntu")
+ taskid = dbase.create_task("test", str(
+ time.ctime()), "this is a unit test")
+ dbase.add_host_2task(
+ taskid,
+ "hosta",
+ "hw82576",
+ "xxx",
+ "x",
+ "82599",
+ "ubuntu")
dbase.add_extent_2task(taskid, "CETH", "driver", "version 2.0")
dbase.add_extent_2task(taskid, "EVS", "switch", "version 3.0")
- testid = dbase.add_test_2task(taskid, "Tn-1", 'udp', "throughput", "ovs", None, "netperf")
+ testid = dbase.add_test_2task(
+ taskid,
+ "Tn-1",
+ 'udp',
+ "throughput",
+ "ovs",
+ None,
+ "netperf")
data = {
'64': {
'OfferedLoad': 2,
@@ -557,7 +652,14 @@ def unit_test():
}
dbase.add_data_2test(testid, data)
- testid = dbase.add_test_2task(taskid, "Tn-1", 'udp', "frameloss", "ovs", None, "netperf")
+ testid = dbase.add_test_2task(
+ taskid,
+ "Tn-1",
+ 'udp',
+ "frameloss",
+ "ovs",
+ None,
+ "netperf")
data = {
'64': {
'OfferedLoad': 2,
@@ -575,13 +677,35 @@ def unit_test():
}
dbase.add_data_2test(testid, data)
- testid = dbase.add_test_2task(taskid, "Tn-1", 'udp', "latency", "ovs", None, "netperf")
+ testid = dbase.add_test_2task(
+ taskid,
+ "Tn-1",
+ 'udp',
+ "latency",
+ "ovs",
+ None,
+ "netperf")
data = {
- 64: {'MaximumLatency': 0.0, 'AverageLatency': 0.0, 'MinimumLatency': 0.0, 'OfferedLoad': 0.0},
- 128: {'MaximumLatency': 0.0, 'AverageLatency': 0.0, 'MinimumLatency': 0.0, 'OfferedLoad': 0.0},
- 512: {'MaximumLatency': 0.0, 'AverageLatency': 0.0, 'MinimumLatency': 0.0, 'OfferedLoad': 0.0},
- 1024: {'MaximumLatency': 0.0, 'AverageLatency': 0.0, 'MinimumLatency': 0.0, 'OfferedLoad': 0.0}
- }
+ 64: {
+ 'MaximumLatency': 0.0,
+ 'AverageLatency': 0.0,
+ 'MinimumLatency': 0.0,
+ 'OfferedLoad': 0.0},
+ 128: {
+ 'MaximumLatency': 0.0,
+ 'AverageLatency': 0.0,
+ 'MinimumLatency': 0.0,
+ 'OfferedLoad': 0.0},
+ 512: {
+ 'MaximumLatency': 0.0,
+ 'AverageLatency': 0.0,
+ 'MinimumLatency': 0.0,
+ 'OfferedLoad': 0.0},
+ 1024: {
+ 'MaximumLatency': 0.0,
+ 'AverageLatency': 0.0,
+ 'MinimumLatency': 0.0,
+ 'OfferedLoad': 0.0}}
dbase.add_data_2test(testid, data)
query = dbase.query_testlist(1, "Tn")
for item in query:
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/database/tables.py b/testsuites/vstf/vstf_scripts/vstf/controller/database/tables.py
index 55b02e5f..92f857a0 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/database/tables.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/database/tables.py
@@ -52,7 +52,7 @@ class TblCaseInfo(Base):
ScenarioName, FigurePath, Direction, Directiontag,
Configure, Description, **kwargs):
"""
- :param CaseID:
+ :param CaseID:
:param CaseTag: ??
:param CaseName: name of case, like tester-vm
:param ScenarioName: name of scenario, like Tn
@@ -135,7 +135,16 @@ class TblTestList(Base):
Provider = Column(String(const.PROVIDER_LEN))
Tools = Column(String(const.TOOLS_LEN))
- def __init__(self, taskid, casetag, protocol, typ, switch, provider, tools, **kwargs):
+ def __init__(
+ self,
+ taskid,
+ casetag,
+ protocol,
+ typ,
+ switch,
+ provider,
+ tools,
+ **kwargs):
"""Table of test"""
self.TaskID = taskid
self.CaseTag = casetag
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/env_build/cfg_intent_parse.py b/testsuites/vstf/vstf_scripts/vstf/controller/env_build/cfg_intent_parse.py
index b536e3b8..acc88d91 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/env_build/cfg_intent_parse.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/env_build/cfg_intent_parse.py
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
class IntentParser(object):
+
def __init__(self, cfg_file):
self.cfg_file = cfg_file
with file(cfg_file) as fp:
@@ -59,7 +60,9 @@ class IntentParser(object):
for tap_cfg in vm_cfg['taps']:
br_type_set.add(tap_cfg["br_type"])
if len(br_type_set) > 1:
- raise Exception("specified more than one type of vswitchfor host:%s" % host_cfg['ip'])
+ raise Exception(
+ "specified more than one type of vswitchfor host:%s" %
+ host_cfg['ip'])
if len(br_type_set) > 0:
br_type = br_type_set.pop()
host_cfg['br_type'] = br_type
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_build.py b/testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_build.py
index 1d201b77..40e25e9f 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_build.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_build.py
@@ -17,6 +17,7 @@ LOG = logging.getLogger(__name__)
class EnvBuildApi(object):
+
def __init__(self, conn, config_file):
LOG.info("welcome to EnvBuilder")
self.conn = conn
@@ -48,6 +49,7 @@ class EnvBuildApi(object):
class TransmitterBuild(object):
+
def __init__(self, conn, config_file):
LOG.info("welcome to TransmitterBuild")
self.conn = conn
@@ -72,7 +74,9 @@ if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
- parser.add_argument('--rpc_server', help='rabbitmq server for deliver messages.')
+ parser.add_argument(
+ '--rpc_server',
+ help='rabbitmq server for deliver messages.')
parser.add_argument('--config', help='config file to parse')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_collect.py b/testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_collect.py
index 6e32a05d..7861ad31 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_collect.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_collect.py
@@ -11,6 +11,7 @@ from vstf.rpc_frame_work import rpc_producer
class EnvCollectApi(object):
+
def __init__(self, rb_mq_server):
"""
When use collect, a connection of rabbitmq is needed.
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/fabricant.py b/testsuites/vstf/vstf_scripts/vstf/controller/fabricant.py
index 3b1c082f..3f6978e4 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/fabricant.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/fabricant.py
@@ -12,6 +12,7 @@ import vstf.common.constants as cst
class Fabricant(object):
+
def __init__(self, target, conn):
self.conn = conn
self.target = target
@@ -21,7 +22,12 @@ class Fabricant(object):
@property
def declare_commands(self):
- driver = {"install_drivers", "clean_drivers", "autoneg_on", "autoneg_off", "autoneg_query"}
+ driver = {
+ "install_drivers",
+ "clean_drivers",
+ "autoneg_on",
+ "autoneg_off",
+ "autoneg_query"}
builder = {"build_env", "clean_env"}
@@ -29,7 +35,10 @@ class Fabricant(object):
perf = {"perf_run", "run_vnstat", "kill_vnstat", "force_clean"}
- device_mgr = {"get_device_detail", "list_nic_devices", "get_device_verbose"}
+ device_mgr = {
+ "get_device_detail",
+ "list_nic_devices",
+ "get_device_verbose"}
netns = {"clean_all_namespace", "config_dev", "recover_dev", "ping"}
@@ -37,11 +46,22 @@ class Fabricant(object):
cmdline = {"execute"}
- spirent = {"send_packet", "stop_flow", "mac_learning", "run_rfc2544suite", "run_rfc2544_throughput",
- "run_rfc2544_frameloss", "run_rfc2544_latency"}
-
- equalizer = {"get_numa_core", "get_nic_numa", "get_nic_interrupt_proc", "get_vm_info", "bind_cpu",
- "catch_thread_info"}
+ spirent = {
+ "send_packet",
+ "stop_flow",
+ "mac_learning",
+ "run_rfc2544suite",
+ "run_rfc2544_throughput",
+ "run_rfc2544_frameloss",
+ "run_rfc2544_latency"}
+
+ equalizer = {
+ "get_numa_core",
+ "get_nic_numa",
+ "get_nic_interrupt_proc",
+ "get_vm_info",
+ "bind_cpu",
+ "catch_thread_info"}
return driver | cpu | builder | perf | device_mgr | netns | cmdline | collect | spirent | equalizer
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/mail.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/mail.py
index 6792ad91..c217f9e5 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/mail.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/mail.py
@@ -21,6 +21,7 @@ PASSWD = None
class Mail(object):
+
def __init__(self, srv=SRV, user=USER, passwd=PASSWD):
self.srv = srv
self.user = USER
@@ -81,7 +82,10 @@ class Mail(object):
def attach_files(self, files):
for _file in files:
part = MIMEApplication(open(_file, "rb").read())
- part.add_header('Content-Disposition', 'attachment', filename=os.path.basename(_file))
+ part.add_header(
+ 'Content-Disposition',
+ 'attachment',
+ filename=os.path.basename(_file))
self._msg.attach(part)
def send(self):
@@ -114,11 +118,11 @@ if __name__ == "__main__":
<head>
<title>vstf</title>
</head>
-
+
<body>
hello vstf
</body>
-
+
</html>
"""
m.attach_text(context, m.HTML)
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/sendmail.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/sendmail.py
index a4d7bb0a..42f991a8 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/sendmail.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/sendmail.py
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
class SendMail(object):
+
def __init__(self, mail_info):
self._mail_info = mail_info
@@ -32,7 +33,9 @@ class SendMail(object):
if 'attach' in self._mail_info['body']:
send.attach_files(self._mail_info['body']['attach'])
- send.attach_text(self._mail_info['body']['content'], self._mail_info['body']['subtype'])
+ send.attach_text(
+ self._mail_info['body']['content'],
+ self._mail_info['body']['subtype'])
send.attach_title(self._mail_info['body']['subject'])
send.send()
@@ -50,11 +53,11 @@ def unit_test():
<head>
<title>vstf</title>
</head>
-
+
<body>
hello vstf
</body>
-
+
</html>
"""
mail_settings.set_subtype('html')
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/__init__.py
index 547db686..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/__init__.py
@@ -6,5 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/candy_generator.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/candy_generator.py
index ea296550..a3285c9e 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/candy_generator.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/candy_generator.py
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
class CandyGenerator(object):
+
def __init__(self, task):
self._task = task
@@ -99,7 +100,8 @@ class CandyGenerator(object):
"data": scenario_data.get_latency_bardata(case)
}
table = scenario_data.get_latency_tabledata(case)
- test_section = self.create_test(sectionid, params_info, table, draw)
+ test_section = self.create_test(
+ sectionid, params_info, table, draw)
scenario_chapter[name] = test_section
return scenario_chapter
@@ -125,7 +127,10 @@ class CandyGenerator(object):
def main():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-candy.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-candy.log",
+ clevel=logging.INFO)
dbase = DbManage()
taskid = dbase.get_last_taskid()
@@ -135,4 +140,3 @@ def main():
creator.create("Tn")
if __name__ == '__main__':
main()
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/data_factory.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/data_factory.py
index f9fc69d9..ded94ebd 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/data_factory.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/data_factory.py
@@ -12,12 +12,14 @@ import vstf.common.constants as cst
class DataProvider(object):
+
def __init__(self, taskid, dbase):
self._dbase = dbase
self._taskid = taskid
class CommonData(DataProvider):
+
def get_taskname(self):
return self._dbase.query_taskname(self._taskid)
@@ -67,6 +69,7 @@ class CommonData(DataProvider):
class ScenarioData(DataProvider):
+
def __init__(self, taskid, dbase, scenario):
print "ScenarioData in"
DataProvider.__init__(self, taskid, dbase)
@@ -96,13 +99,15 @@ class ScenarioData(DataProvider):
return query
def is_provider_start(self, case, provider):
- count = self._dbase.query_case_provider_count(self._taskid, case, provider)
+ count = self._dbase.query_case_provider_count(
+ self._taskid, case, provider)
if count:
return True
return False
def is_type_provider_start(self, case, provider, ptype):
- count = self._dbase.query_case_type_provider_count(self._taskid, case, provider, ptype)
+ count = self._dbase.query_case_type_provider_count(
+ self._taskid, case, provider, ptype)
if count:
return True
return False
@@ -133,7 +138,12 @@ class ScenarioData(DataProvider):
test_type = "frameloss"
return self.get_summary_tabledata(case, provider, test_type)
- def get_summary_tabledata(self, case, provider, test_type, table_type='pdf'):
+ def get_summary_tabledata(
+ self,
+ case,
+ provider,
+ test_type,
+ table_type='pdf'):
table_head = []
table_body = []
type_title = {
@@ -142,41 +152,77 @@ class ScenarioData(DataProvider):
}
tools = self.get_test_tools(case)
if "spirent" in tools:
- table_body = self._dbase.query_summary_table(self._taskid, case, provider, test_type)
+ table_body = self._dbase.query_summary_table(
+ self._taskid, case, provider, test_type)
if 'pdf' == table_type:
- table_head = [
- ["FrameSize (byte)", test_type, "", "", "", "Latency(uSec)", "", ""],
- ["", " Mpps ", " " + type_title[test_type] + " (%) ", "CPU Used (%)", " Mpps/Ghz ",
- " Min ", " Max ", " Avg "]
- ]
+ table_head = [["FrameSize (byte)",
+ test_type,
+ "",
+ "",
+ "",
+ "Latency(uSec)",
+ "",
+ ""],
+ ["",
+ " Mpps ",
+ " " + type_title[test_type] + " (%) ",
+ "CPU Used (%)",
+ " Mpps/Ghz ",
+ " Min ",
+ " Max ",
+ " Avg "]]
else:
- table_head = [
- ["FrameSize (byte)", " Mpps ", " " + type_title[test_type] + " (%) ", "CPU Used (%)",
- " Mpps/Ghz ", "MinLatency(uSec)", "MaxLatency(uSec)", "AvgLatency(uSec)"],
- ]
+ table_head = [["FrameSize (byte)",
+ " Mpps ",
+ " " + type_title[test_type] + " (%) ",
+ "CPU Used (%)",
+ " Mpps/Ghz ",
+ "MinLatency(uSec)",
+ "MaxLatency(uSec)",
+ "AvgLatency(uSec)"],
+ ]
else:
- table_body = self._dbase.query_summary_simpletable(self._taskid, case, provider, test_type)
+ table_body = self._dbase.query_summary_simpletable(
+ self._taskid, case, provider, test_type)
if 'pdf' == table_type:
- table_head = [
- ["FrameSize (byte)", test_type, "", "", "", "Latency(uSec)"],
- ["", " Mpps ", " " + type_title[test_type] + " (%)", "CPU Used (%)", " Mpps/Ghz ",
- " Avg "]
- ]
+ table_head = [["FrameSize (byte)",
+ test_type,
+ "",
+ "",
+ "",
+ "Latency(uSec)"],
+ ["",
+ " Mpps ",
+ " " + type_title[test_type] + " (%)",
+ "CPU Used (%)",
+ " Mpps/Ghz ",
+ " Avg "]]
else:
- table_head = [
- ["FrameSize (byte)", " Mpps ", " " + type_title[test_type] + " (%) ", "CPU Used (%)",
- " Mpps/Ghz ", "AvgLatency(uSec)"],
- ]
+ table_head = [["FrameSize (byte)",
+ " Mpps ",
+ " " + type_title[test_type] + " (%) ",
+ "CPU Used (%)",
+ " Mpps/Ghz ",
+ "AvgLatency(uSec)"],
+ ]
return table_head + table_body
def get_ratedata(self, testid, test_type):
- table_head = [
- ["FrameSize (bytes)", "Bandwidth(Mpps)", "Load (%)", "CPU Usage(%)", "Mpps/Ghz", "AvgLatency(uSec)"],
- ]
+ table_head = [["FrameSize (bytes)",
+ "Bandwidth(Mpps)",
+ "Load (%)",
+ "CPU Usage(%)",
+ "Mpps/Ghz",
+ "AvgLatency(uSec)"],
+ ]
query = self._dbase.query_testdata(testid, test_type)
table_body = []
for item in query:
- table_body.append([item.AvgFrameSize, item.Bandwidth, item.OfferedLoad, item.CPU, item.MppspGhz,
+ table_body.append([item.AvgFrameSize,
+ item.Bandwidth,
+ item.OfferedLoad,
+ item.CPU,
+ item.MppspGhz,
item.AverageLatency])
result = []
if table_body:
@@ -203,20 +249,29 @@ class ScenarioData(DataProvider):
for provider in cst.PROVIDERS:
if self.is_provider_start(case, provider):
if item == 'Percent':
- query = self._dbase.query_load(self._taskid, case, provider, test_type)
+ query = self._dbase.query_load(
+ self._taskid, case, provider, test_type)
elif item == 'Mpps':
- query = self._dbase.query_bandwidth(self._taskid, case, provider, test_type)
+ query = self._dbase.query_bandwidth(
+ self._taskid, case, provider, test_type)
else:
- query = self._dbase.query_avglatency(self._taskid, case, provider, test_type)
+ query = self._dbase.query_avglatency(
+ self._taskid, case, provider, test_type)
query = map(lambda x: list(x), zip(*query))
if query:
- table_head = [[type_dict["FrameSize"]] + map(lambda x: " %4d " % (x), query[0])]
+ table_head = [[type_dict["FrameSize"]] +
+ map(lambda x: " %4d " % (x), query[0])]
if item == "Avg":
- data = map(lambda x: item_dict[item] + "%.1f" % x + item_dict[item], query[1])
+ data = map(
+ lambda x: item_dict[item] + "%.1f" %
+ x + item_dict[item], query[1])
else:
- data = map(lambda x: item_dict[item] + "%.2f" % x + item_dict[item], query[1])
+ data = map(
+ lambda x: item_dict[item] + "%.2f" %
+ x + item_dict[item], query[1])
if item == "Mpps":
- line_table = map(lambda x: "%.2f" % (line_speed * 1000 / (8 * (x + 20))), query[0])
+ line_table = map(lambda x: "%.2f" % (
+ line_speed * 1000 / (8 * (x + 20))), query[0])
table.append([type_dict[provider]] + data)
if table:
if item == "Mpps":
@@ -260,7 +315,8 @@ class ScenarioData(DataProvider):
result = []
if table_data:
ytitle = "Average Latency (uSec)"
- category_names = map(lambda x: "FS:%4d" % int(float(x)) + "LOAD:50", table_data[0][1:])
+ category_names = map(lambda x: "FS:%4d" %
+ int(float(x)) + "LOAD:50", table_data[0][1:])
bar_ = map(lambda x: x[0], table_data[1:])
data = map(lambda x: x[1:], table_data[1:])
result = [ytitle, category_names, bar_, data]
@@ -268,10 +324,12 @@ class ScenarioData(DataProvider):
def get_bardata(self, case, provider, test_type):
if test_type == "latency":
- query = self._dbase.query_avglatency(self._taskid, case, provider, test_type)
+ query = self._dbase.query_avglatency(
+ self._taskid, case, provider, test_type)
item = "Avg"
else:
- query = self._dbase.query_load(self._taskid, case, provider, test_type)
+ query = self._dbase.query_load(
+ self._taskid, case, provider, test_type)
item = "Percent"
title_dict = {
@@ -290,7 +348,9 @@ class ScenarioData(DataProvider):
query = map(lambda x: list(x), zip(*query))
result = []
if query:
- category_names = map(lambda x: "FS:%4d" % x + name_dict[item], query[0])
+ category_names = map(
+ lambda x: "FS:%4d" %
+ x + name_dict[item], query[0])
data = query[1:]
bar_ = [color_dict[item]]
result = [ytitle, category_names, bar_, data]
@@ -298,6 +358,7 @@ class ScenarioData(DataProvider):
class TaskData(object):
+
def __init__(self, taskid, dbase):
self.__common = CommonData(taskid, dbase)
scenario_list = self.__common.get_scenariolist()
@@ -312,6 +373,7 @@ class TaskData(object):
class HistoryData(DataProvider):
+
def get_data(self, task_list, case, provider, ttype, item):
"""
@provider in ["fastlink", "rdp", "l2switch", ""]
@@ -324,17 +386,18 @@ class HistoryData(DataProvider):
sizes = []
for taskid in task_list:
if item == 'ratep':
- query = self._dbase.query_bandwidth(taskid, case, provider, ttype)
+ query = self._dbase.query_bandwidth(
+ taskid, case, provider, ttype)
else:
- query = self._dbase.query_avglatency(taskid, case, provider, ttype)
+ query = self._dbase.query_avglatency(
+ taskid, case, provider, ttype)
if query:
data = {}
for size, value in query:
data[size] = value
sizes.extend(data.keys())
- sizes = {}.fromkeys(sizes).keys()
- sizes.sort()
+ sizes = sorted({}.fromkeys(sizes).keys())
datas.append({taskid: data})
result = []
@@ -367,7 +430,10 @@ class HistoryData(DataProvider):
return task_list
def get_history_info(self, case):
- provider_dict = {"fastlink": "Fast Link ", "l2switch": "L2Switch ", "rdp": "Kernel RDP "}
+ provider_dict = {
+ "fastlink": "Fast Link ",
+ "l2switch": "L2Switch ",
+ "rdp": "Kernel RDP "}
ttype_dict = {
"throughput": "Throughput Testing ",
"frameloss": "Frame Loss Testing ",
@@ -390,7 +456,8 @@ class HistoryData(DataProvider):
item = "ratep"
for provider in cst.PROVIDERS:
- table_data = self.get_data(task_list, case, provider, ttype, item)
+ table_data = self.get_data(
+ task_list, case, provider, ttype, item)
if table_data:
data = {
"title": provider_dict[provider] + items_dict[item],
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/html_base.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/html_base.py
index 5769da79..02606b4b 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/html_base.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/html_base.py
@@ -13,6 +13,7 @@ import vstf.common.pyhtml as pyhtm
class HtmlBase(object):
+
def __init__(self, provider):
self._page = pyhtm.PyHtml('Html Text')
self._provider = provider
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/htmlcreator.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/htmlcreator.py
index 695ea37f..f866f185 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/htmlcreator.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/htmlcreator.py
@@ -20,6 +20,7 @@ LOG = logging.getLogger(__name__)
class HtmlCreator(HtmlBase):
+
def create_story(self):
self.add_context()
@@ -70,7 +71,10 @@ class HtmlCreator(HtmlBase):
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/html-creator.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/html-creator.log",
+ clevel=logging.INFO)
out_file = "vstf_report.html"
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/element.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/element.py
index ef8b54df..6622281b 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/element.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/element.py
@@ -32,8 +32,16 @@ from vstf.controller.reporters.report.pdf.styles import *
class eImage(Image):
""" an image(digital picture)which contains the function of auto zoom picture """
- def __init__(self, filename, width=None, height=None, kind='direct', mask="auto", lazy=1, hAlign='CENTRE',
- vAlign='BOTTOM'):
+ def __init__(
+ self,
+ filename,
+ width=None,
+ height=None,
+ kind='direct',
+ mask="auto",
+ lazy=1,
+ hAlign='CENTRE',
+ vAlign='BOTTOM'):
Image.__init__(self, filename, None, None, kind, mask, lazy)
print height, width
print self.drawHeight, self.drawWidth
@@ -78,6 +86,7 @@ class eTable(object):
class eCommonTable(eTable):
+
def analysisData(self, data):
self._style = [
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -89,6 +98,7 @@ class eCommonTable(eTable):
class eConfigTable(eTable):
+
def analysisData(self, data):
self._style = [
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -113,6 +123,7 @@ class eConfigTable(eTable):
class eSummaryTable(eTable):
+
def analysisData(self, data):
self._style = [
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -127,6 +138,7 @@ class eSummaryTable(eTable):
class eGitInfoTable(eTable):
+
def analysisData(self, data):
self._style = [
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -141,6 +153,7 @@ class eGitInfoTable(eTable):
class eScenarioTable(eTable):
+
def analysisData(self, data):
self._style = [
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -157,6 +170,7 @@ class eScenarioTable(eTable):
class eOptionsTable(eTable):
+
def analysisData(self, data):
self._style = [
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -181,6 +195,7 @@ class eOptionsTable(eTable):
class eProfileTable(eTable):
+
def analysisData(self, data):
self._style = [
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -194,6 +209,7 @@ class eProfileTable(eTable):
class eDataTable(eTable):
+
def analysisData(self, data):
result = data
self._style = [
@@ -229,6 +245,7 @@ class eDataTable(eTable):
class eGraphicsTable(eTable):
+
def analysisData(self, data):
self._style = [
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -238,12 +255,14 @@ class eGraphicsTable(eTable):
class noScaleXValueAxis(XValueAxis):
+
def __init__(self):
XValueAxis.__init__(self)
def makeTickLabels(self):
g = Group()
- if not self.visibleLabels: return g
+ if not self.visibleLabels:
+ return g
f = self._labelTextFormat # perhaps someone already set it
if f is None:
@@ -307,14 +326,17 @@ class noScaleXValueAxis(XValueAxis):
txt = f(t)
else:
raise ValueError('Invalid labelTextFormat %s' % f)
- if post: txt = post % txt
+ if post:
+ txt = post % txt
pos[d] = v
label.setOrigin(*pos)
label.setText(txt)
- # special property to ensure a label doesn't project beyond the bounds of an x-axis
+ # special property to ensure a label doesn't project beyond
+ # the bounds of an x-axis
if self.keepTickLabelsInside:
- if isinstance(self, XValueAxis): # not done yet for y axes
+ if isinstance(
+ self, XValueAxis): # not done yet for y axes
a_x = self._x
if not i: # first one
x0, y0, x1, y1 = label.getBounds()
@@ -324,7 +346,8 @@ class noScaleXValueAxis(XValueAxis):
a_x1 = a_x + self._length
x0, y0, x1, y1 = label.getBounds()
if x1 > a_x1:
- label = label.clone(dx=label.dx - x1 + a_x1)
+ label = label.clone(
+ dx=label.dx - x1 + a_x1)
g.add(label)
return g
@@ -342,8 +365,10 @@ class noScaleXValueAxis(XValueAxis):
The chart first configures the axis, then asks it to
"""
assert self._configured, "Axis cannot scale numbers before it is configured"
- if value is None: value = 0
- # this could be made more efficient by moving the definition of org and sf into the configuration
+ if value is None:
+ value = 0
+ # this could be made more efficient by moving the definition of org and
+ # sf into the configuration
org = (self._x, self._y)[self._dataIndex]
sf = self._length / (len(self._tickValues) + 1)
if self.reverseDirection:
@@ -353,6 +378,7 @@ class noScaleXValueAxis(XValueAxis):
class noScaleLinePlot(LinePlot):
+
def __init__(self):
LinePlot.__init__(self)
self.xValueAxis = noScaleXValueAxis()
@@ -373,7 +399,8 @@ class noScaleLinePlot(LinePlot):
for colNo in range(len_row):
datum = self.data[rowNo][colNo] # x, y value
x = self.x + self.width / (len_row + 1) * (colNo + 1)
- self.xValueAxis.labels[colNo].x = self.x + self.width / (len_row + 1) * (colNo + 1)
+ self.xValueAxis.labels[colNo].x = self.x + \
+ self.width / (len_row + 1) * (colNo + 1)
y = self.yValueAxis.scale(datum[1])
# print self.width, " ", x
line.append((x, y))
@@ -383,6 +410,7 @@ class noScaleLinePlot(LinePlot):
# def _innerDrawLabel(self, rowNo, colNo, x, y):
# return None
class eLinePlot(object):
+
def __init__(self, data, style):
self._lpstyle = style
self._linename = data[0]
@@ -485,9 +513,11 @@ class eLinePlot(object):
for i in range(line_cnts):
styleIndex = i % sytle_cnts
lp.lines[i].strokeColor = self._lpstyle.linestyle[styleIndex][0]
- lp.lines[i].symbol = makeMarker(self._lpstyle.linestyle[styleIndex][1])
+ lp.lines[i].symbol = makeMarker(
+ self._lpstyle.linestyle[styleIndex][1])
lp.lines[i].strokeWidth = self._lpstyle.linestyle[styleIndex][2]
- color_paris.append((self._lpstyle.linestyle[styleIndex][0], self._linename[i]))
+ color_paris.append(
+ (self._lpstyle.linestyle[styleIndex][0], self._linename[i]))
# lp.lineLabels[i].strokeColor = self._lpstyle.linestyle[styleIndex][0]
lp.lineLabelFormat = self._lpstyle.format[0]
@@ -501,8 +531,6 @@ class eLinePlot(object):
lp.yValueAxis.valueMin, lp.yValueAxis.valueMax, lp.yValueAxis.valueSteps = self._yvalue
-
-
# lp.xValueAxis.forceZero = 0
# lp.xValueAxis.avoidBoundFrac = 1
# lp.xValueAxis.tickDown = 3
@@ -540,6 +568,7 @@ class eLinePlot(object):
class eHorizontalLineChart(object):
+
def __init__(self, data, style):
self._lcstyle = style
if len(data) < 1:
@@ -630,9 +659,11 @@ class eHorizontalLineChart(object):
for i in range(line_cnts):
styleIndex = i % sytle_cnts
lc.lines[i].strokeColor = self._lcstyle.linestyle[styleIndex][0]
- lc.lines[i].symbol = makeMarker(self._lcstyle.linestyle[styleIndex][1])
+ lc.lines[i].symbol = makeMarker(
+ self._lcstyle.linestyle[styleIndex][1])
lc.lines[i].strokeWidth = self._lcstyle.linestyle[styleIndex][2]
- color_paris.append((self._lcstyle.linestyle[styleIndex][0], self._linename[i]))
+ color_paris.append(
+ (self._lcstyle.linestyle[styleIndex][0], self._linename[i]))
lc.lineLabels.fontSize = self._lcstyle.labelsfont - 2
@@ -660,6 +691,7 @@ class eHorizontalLineChart(object):
class eBarChartColumn(object):
+
def __init__(self, data, style):
self._bcstyle = style
if len(data) < 4:
@@ -702,7 +734,10 @@ class eBarChartColumn(object):
color_paris = []
for i in range(bar_cnt):
bc.bars[i].fillColor = self._bcstyle.pillarstyle[self._bar[i]][0]
- color_paris.append((self._bcstyle.pillarstyle[self._bar[i]][0], self._bar[i]))
+ color_paris.append(
+ (self._bcstyle.pillarstyle[
+ self._bar[i]][0],
+ self._bar[i]))
bc.fillColor = self._bcstyle.background
bc.barLabels.fontName = 'Helvetica'
@@ -761,6 +796,7 @@ class eBarChartColumn(object):
class eParagraph(object):
+
def __init__(self, data, style):
self._pstyle = style
self._data = self.analysisData(data)
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdfcreator.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdfcreator.py
index c33974ec..67f988c9 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdfcreator.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdfcreator.py
@@ -22,6 +22,7 @@ LOG = logging.getLogger(__name__)
class PdfCreator(object):
+
def __init__(self, provider):
self._provider = provider
self._story = []
@@ -114,7 +115,10 @@ class PdfCreator(object):
def main():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/pdf-creator.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/pdf-creator.log",
+ clevel=logging.INFO)
out_file = "vstf_report.pdf"
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdftemplate.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdftemplate.py
index 69c65401..7e287814 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdftemplate.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdftemplate.py
@@ -13,9 +13,11 @@ from reportlab.platypus.doctemplate import SimpleDocTemplate
from reportlab.platypus import PageBreak
from vstf.controller.reporters.report.pdf.styles import TemplateStyle, ps_head_lv1, ps_head_lv2, ps_head_lv3
import vstf.common.constants as cst
+from functools import reduce
class BaseDocTemplate(SimpleDocTemplate):
+
def __init__(self, filename, **kw):
self.allowSplitting = 0
SimpleDocTemplate.__init__(self, filename, **kw)
@@ -34,6 +36,7 @@ class BaseDocTemplate(SimpleDocTemplate):
class PdfTemplate(object):
+
def __init__(self, title, logo, header, footer, note=[], style="default"):
self._style = TemplateStyle(name=style)
self._title = title
@@ -41,7 +44,8 @@ class PdfTemplate(object):
#self._header = header[0]
self._footer = footer
self._note = note
- info = " Generated on %s " % time.strftime(cst.TIME_FORMAT2, time.localtime())
+ info = " Generated on %s " % time.strftime(
+ cst.TIME_FORMAT2, time.localtime())
self._note += [info]
def myFirstPage(self, canvas, doc):
@@ -54,46 +58,78 @@ class PdfTemplate(object):
sizes = (self._style.page_wight, self._style.page_height)
doc = BaseDocTemplate(output, pagesize=sizes)
# doc.build(story, onFirstPage=self.myFirstPage, onLaterPages=self.myLaterPages)
- doc.multiBuild(story, onFirstPage=self.myFirstPage, onLaterPages=self.myLaterPages)
+ doc.multiBuild(
+ story,
+ onFirstPage=self.myFirstPage,
+ onLaterPages=self.myLaterPages)
class PdfVswitch(PdfTemplate):
+
def myFirstPage(self, canvas, doc):
canvas.saveState()
title_lines = len(self._title)
line_size = [self._style.title_size] * title_lines
line_size.append(0)
- canvas.drawImage(self._logo,
- (self._style.page_wight - self._style.logo_width) / 2.0,
- self._style.page_height / 2.0 + (1 + self._style.title_leading) * reduce(lambda x, y: x + y,
- line_size),
- self._style.logo_width,
- self._style.logo_height
- )
+ canvas.drawImage(
+ self._logo,
+ (self._style.page_wight -
+ self._style.logo_width) /
+ 2.0,
+ self._style.page_height /
+ 2.0 +
+ (
+ 1 +
+ self._style.title_leading) *
+ reduce(
+ lambda x,
+ y: x +
+ y,
+ line_size),
+ self._style.logo_width,
+ self._style.logo_height)
for i in range(title_lines):
canvas.setFont(self._style.title_font, line_size[i])
- canvas.drawCentredString(self._style.page_wight / 2.0,
- self._style.page_height / 2.0 + (1 + self._style.title_leading) * reduce(
- lambda x, y: x + y, line_size[i + 1:]),
- self._title[i]
- )
+ canvas.drawCentredString(
+ self._style.page_wight /
+ 2.0,
+ self._style.page_height /
+ 2.0 +
+ (
+ 1 +
+ self._style.title_leading) *
+ reduce(
+ lambda x,
+ y: x +
+ y,
+ line_size[
+ i +
+ 1:]),
+ self._title[i])
size = self._style.body_size
canvas.setFont(self._style.body_font, size)
note_line = len(self._note)
for i in range(note_line):
print self._note[i]
- canvas.drawCentredString(self._style.page_wight / 2.0,
- self._style.page_height / 5.0 + (1 + self._style.body_leading) * size * (
- note_line - i - 1),
- self._note[i]
- )
+ canvas.drawCentredString(self._style.page_wight /
+ 2.0, self._style.page_height /
+ 5.0 +
+ (1 +
+ self._style.body_leading) *
+ size *
+ (note_line -
+ i -
+ 1), self._note[i])
size = self._style.body_size - 2
canvas.setFont(self._style.body_font, size)
- canvas.drawCentredString(self._style.page_wight / 2.0,
- self._style.page_bottom / 2.0 + (1 + self._style.body_leading) * size,
- self._footer[0])
+ canvas.drawCentredString(self._style.page_wight /
+ 2.0, self._style.page_bottom /
+ 2.0 +
+ (1 +
+ self._style.body_leading) *
+ size, self._footer[0])
canvas.restoreState()
def myLaterPages(self, canvas, doc):
@@ -106,9 +142,7 @@ class PdfVswitch(PdfTemplate):
)
size = self._style.body_size - 2
canvas.setFont(self._style.body_font, size)
- canvas.drawCentredString(self._style.page_wight / 2.0,
- self._style.page_bottom - 24,
- "%s%s Page %2d " % (self._footer[0], " " * 8, doc.page - 1)
- )
+ canvas.drawCentredString(
+ self._style.page_wight / 2.0, self._style.page_bottom - 24, "%s%s Page %2d " %
+ (self._footer[0], " " * 8, doc.page - 1))
canvas.restoreState()
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/story.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/story.py
index 940c20fb..f1442fe7 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/story.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/story.py
@@ -21,6 +21,7 @@ from element import *
class Story(object):
+
def __init__(self):
self._storylist = []
@@ -30,6 +31,7 @@ class Story(object):
class StoryDecorator(Story):
+
def __init__(self, story, data=None, style=None):
self._story = story
self._data = data
@@ -47,6 +49,7 @@ class StoryDecorator(Story):
class ImageStory(StoryDecorator):
+
def new_story(self):
print "Image Story"
for filename in self._data:
@@ -60,7 +63,12 @@ class ImageStory(StoryDecorator):
image_hAlign = style.image_hAlign
image_vAlign = style.image_vAlign
self._story.storylist.append(
- eImage(filename, image_width, image_height, hAlign=image_hAlign, vAlign=image_vAlign))
+ eImage(
+ filename,
+ image_width,
+ image_height,
+ hAlign=image_hAlign,
+ vAlign=image_vAlign))
else:
style = is_default
image_height = style.image_height
@@ -69,22 +77,30 @@ class ImageStory(StoryDecorator):
image_vAlign = style.image_vAlign
# self._story.storylist.append(eGraphicsTable([[' ' * 5, eImage(filename, image_width, image_height, hAlign=image_hAlign, vAlign=image_vAlign)]], ts_left).table)
self._story.storylist.append(
- eImage(filename, image_width, image_height, hAlign=image_hAlign, vAlign=image_vAlign))
+ eImage(
+ filename,
+ image_width,
+ image_height,
+ hAlign=image_hAlign,
+ vAlign=image_vAlign))
class HeaderStory(StoryDecorator):
+
def new_story(self):
print "header story"
self._story.storylist.append(PageBreak())
class PageBreakStory(StoryDecorator):
+
def new_story(self):
print "PageBreak story"
self._story.storylist.append(PageBreak())
class TableOfContentsStory(StoryDecorator):
+
def new_story(self):
print "TableOfContents story"
self._data = [" ", " ", "Table Of Contents", ""]
@@ -96,35 +112,43 @@ class TableOfContentsStory(StoryDecorator):
class SpaceStory(StoryDecorator):
+
def new_story(self):
style = ps_space
self._story.storylist.append(eParagraph([" ", " "], style).para)
class TableStory(StoryDecorator):
+
def new_story(self):
print "table story"
style = ts_default
if self._style == 1:
self._story.storylist.append(eDataTable(self._data, style).table)
- elif self._style ==2:
+ elif self._style == 2:
style = ts_left
self._story.storylist.append(eCommonTable(self._data, style).table)
elif self._style == 3:
self._story.storylist.append(eConfigTable(self._data, style).table)
elif self._style == 4:
- self._story.storylist.append(eOptionsTable(self._data, style).table)
+ self._story.storylist.append(
+ eOptionsTable(self._data, style).table)
elif self._style == 5:
- self._story.storylist.append(eProfileTable(self._data, style).table)
+ self._story.storylist.append(
+ eProfileTable(self._data, style).table)
elif self._style == 6:
- self._story.storylist.append(eSummaryTable(self._data, style).table)
+ self._story.storylist.append(
+ eSummaryTable(self._data, style).table)
elif self._style == 7:
- self._story.storylist.append(eScenarioTable(self._data, style).table)
+ self._story.storylist.append(
+ eScenarioTable(self._data, style).table)
elif self._style == 8:
- self._story.storylist.append(eGitInfoTable(self._data, style).table)
+ self._story.storylist.append(
+ eGitInfoTable(self._data, style).table)
class LinePlotStory(StoryDecorator):
+
def new_story(self):
print "LinePlot"
style = lps_default
@@ -137,18 +161,21 @@ class LinePlotStory(StoryDecorator):
class LineChartStory(StoryDecorator):
+
def new_story(self):
print "LineChartStory: "
style = lcs_default
if not self._data:
print "data error "
return
- data = eGraphicsTable([[eHorizontalLineChart(self._data, style).draw]]).table
+ data = eGraphicsTable(
+ [[eHorizontalLineChart(self._data, style).draw]]).table
if data:
self._story.storylist.append(data)
class BarChartStory(StoryDecorator):
+
def new_story(self):
print "BarChartStory: "
style = bcs_default
@@ -156,12 +183,14 @@ class BarChartStory(StoryDecorator):
print "data error "
return
- data = eGraphicsTable([[eBarChartColumn(self._data, style).draw]]).table
+ data = eGraphicsTable(
+ [[eBarChartColumn(self._data, style).draw]]).table
if data:
self._story.storylist.append(data)
class ParagraphStory(StoryDecorator):
+
def new_story(self):
print "Paragraph Story"
style = ps_body
@@ -174,6 +203,7 @@ class ParagraphStory(StoryDecorator):
class TitleStory(StoryDecorator):
+
def new_story(self):
print "Paragraph Story"
if self._style - 1 in range(9):
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/html_provider.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/html_provider.py
index 74c4c593..2e0863ec 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/html_provider.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/html_provider.py
@@ -15,6 +15,7 @@ from vstf.controller.settings.template_settings import TemplateSettings
class HtmlProvider(object):
+
def __init__(self, info, style):
self._info = info
self._style = style
@@ -32,7 +33,10 @@ class HtmlProvider(object):
def main():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/html-provder.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/html-provder.log",
+ clevel=logging.INFO)
html_settings = HtmlSettings()
LOG.info(html_settings.settings)
@@ -42,4 +46,4 @@ def main():
LOG.info(provider.get_context)
if __name__ == '__main__':
- main() \ No newline at end of file
+ main()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/pdf_provider.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/pdf_provider.py
index e1cb09ef..f7752016 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/pdf_provider.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/pdf_provider.py
@@ -15,6 +15,7 @@ from vstf.controller.settings.template_settings import TemplateSettings
class PdfProvider(object):
+
def __init__(self, info):
self._info = info
@@ -37,7 +38,10 @@ class PdfProvider(object):
def main():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/pdf-provider.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/pdf-provider.log",
+ clevel=logging.INFO)
info = TemplateSettings()
provider = PdfProvider(info.settings)
@@ -46,4 +50,4 @@ def main():
LOG.info(provider.get_context)
if __name__ == '__main__':
- main() \ No newline at end of file
+ main()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/reporter.py b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/reporter.py
index 654c9b83..ea0a1ad0 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/reporters/reporter.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/reporters/reporter.py
@@ -30,6 +30,7 @@ LOG = logging.getLogger(__name__)
class Report(object):
+
def __init__(self, dbase, rpath):
"""
@@ -47,7 +48,10 @@ class Report(object):
creator = CandyGenerator(task)
attach_list = []
for scenario in scenario_list:
- out_file = os.path.join(self._rpath, "vstf_report_%s_%s.pdf" % (scenario, time.strftime(cst.TIME_FORMAT3)))
+ out_file = os.path.join(
+ self._rpath, "vstf_report_%s_%s.pdf" %
+ (scenario, time.strftime(
+ cst.TIME_FORMAT3)))
LOG.info(out_file)
creator.create(scenario)
info = TemplateSettings()
@@ -90,7 +94,10 @@ class Report(object):
def main():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-reporter.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-reporter.log",
+ clevel=logging.INFO)
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-rpath',
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/cpu_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/cpu_settings.py
index a25af4c1..9589e119 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/cpu_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/cpu_settings.py
@@ -18,6 +18,7 @@ LOG = logging.getLogger(__name__)
class CpuSettings(sets.Settings):
+
def __init__(self, path="/etc/vstf/perf/",
filename="sw_perf.cpu-settings",
mode=sets.SETS_SINGLE):
@@ -32,9 +33,22 @@ class CpuSettings(sets.Settings):
for item in body:
item = item.encode()
func_name = "set_%s" % item
- setattr(self, func_name, self._setting_file(func_name, self._mset['affctl'], self._fset['affctl'], item))
+ setattr(
+ self,
+ func_name,
+ self._setting_file(
+ func_name,
+ self._mset['affctl'],
+ self._fset['affctl'],
+ item))
func_name = "mset_%s" % item
- setattr(self, func_name, self._setting_memory(func_name, self._mset['affctl'], item))
+ setattr(
+ self,
+ func_name,
+ self._setting_memory(
+ func_name,
+ self._mset['affctl'],
+ item))
LOG.debug(self.__dict__)
@@ -59,8 +73,10 @@ class CpuSettings(sets.Settings):
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-cpu-settings.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-cpu-settings.log",
+ clevel=logging.INFO)
if __name__ == '__main__':
unit_test()
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/device_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/device_settings.py
index 25f2c5bf..5fe3976f 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/device_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/device_settings.py
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
class DeviceSettings(sets.Settings):
+
def __init__(self, path="/etc/vstf/perf/",
filename="sw_perf.device-settings",
mode=sets.SETS_SINGLE):
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/flows_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/flows_settings.py
index 9cd1a1b7..f28d5b5b 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/flows_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/flows_settings.py
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
class FlowsSettings(sets.Settings):
+
def __init__(self, path="/etc/vstf/perf/",
filename="sw_perf.flownodes-settings",
mode=sets.SETS_SINGLE):
@@ -27,16 +28,44 @@ class FlowsSettings(sets.Settings):
for actor in self._check_actors:
actor = actor.encode()
func_name = "add_%s" % actor
- setattr(self, func_name, self._adding_file(func_name, self._mset, self._fset, actor, self._check_add))
+ setattr(
+ self,
+ func_name,
+ self._adding_file(
+ func_name,
+ self._mset,
+ self._fset,
+ actor,
+ self._check_add))
func_name = "madd_%s" % actor
- setattr(self, func_name, self._adding_memory(func_name, self._mset, actor, self._check_add))
+ setattr(
+ self,
+ func_name,
+ self._adding_memory(
+ func_name,
+ self._mset,
+ actor,
+ self._check_add))
for actor in self._nocheck_actors:
actor = actor.encode()
func_name = "add_%s" % actor
- setattr(self, func_name, self._adding_file(func_name, self._mset, self._fset, actor))
+ setattr(
+ self,
+ func_name,
+ self._adding_file(
+ func_name,
+ self._mset,
+ self._fset,
+ actor))
func_name = "madd_%s" % actor
- setattr(self, func_name, self._adding_memory(func_name, self._mset, actor))
+ setattr(
+ self,
+ func_name,
+ self._adding_memory(
+ func_name,
+ self._mset,
+ actor))
LOG.debug(self.__dict__.keys())
@@ -70,7 +99,10 @@ class FlowsSettings(sets.Settings):
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-flows-settings.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-flows-settings.log",
+ clevel=logging.INFO)
flows_settings = FlowsSettings()
LOG.info(flows_settings.settings)
@@ -113,7 +145,7 @@ def unit_test():
cpu = {
"agent": "192.168.188.16",
- "affctl":{
+ "affctl": {
"policy": 2
}
}
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/forwarding_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/forwarding_settings.py
index 636ddfda..138337cb 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/forwarding_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/forwarding_settings.py
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
class ForwardingSettings(sets.Settings):
+
def __init__(self, path="/etc/vstf/perf/",
filename="sw_perf.forwarding-settings",
mode=sets.SETS_SINGLE):
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/html_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/html_settings.py
index ce87733e..89af7a54 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/html_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/html_settings.py
@@ -15,13 +15,21 @@ LOG = logging.getLogger(__name__)
class HtmlSettings(sets.Settings):
- def __init__(self, path="/etc/vstf/", filename="reporters.html-settings", mode=sets.SETS_DEFAULT):
+
+ def __init__(
+ self,
+ path="/etc/vstf/",
+ filename="reporters.html-settings",
+ mode=sets.SETS_DEFAULT):
super(HtmlSettings, self).__init__(path, filename, mode)
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/html-settings.log", clevel=logging.DEBUG)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/html-settings.log",
+ clevel=logging.DEBUG)
html_settings = HtmlSettings()
style = {
'table': {
@@ -36,13 +44,13 @@ def unit_test():
'border': '1px solid green',
'padding': '8px',
'word-wrap': 'break-all'
- },
+ },
'th':
{
'background-color': '#EAF2D3',
'border': '1px solid green',
'padding': '8px'
- }
+ }
}
html_settings.set_style(style)
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/mail_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/mail_settings.py
index db01097f..967aa601 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/mail_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/mail_settings.py
@@ -18,7 +18,12 @@ LOG = logging.getLogger(__name__)
class MailSettings(sets.Settings):
- def __init__(self, path="/etc/vstf", filename="reporters.mail.mail-settings", mode=sets.SETS_DEFAULT):
+
+ def __init__(
+ self,
+ path="/etc/vstf",
+ filename="reporters.mail.mail-settings",
+ mode=sets.SETS_DEFAULT):
super(MailSettings, self).__init__(path, filename, mode)
def _register_func(self):
@@ -30,11 +35,24 @@ class MailSettings(sets.Settings):
for item in body:
item = item.encode()
func_name = "set_%s" % item
- setattr(self, func_name, self._setting_file(func_name, self._mset['body'], self._fset['body'], item))
+ setattr(
+ self,
+ func_name,
+ self._setting_file(
+ func_name,
+ self._mset['body'],
+ self._fset['body'],
+ item))
other = {"attach", "content", "subtype"}
for item in other:
func_name = "mset_%s" % item
- setattr(self, func_name, self._setting_memory(func_name, self._mset['body'], item))
+ setattr(
+ self,
+ func_name,
+ self._setting_memory(
+ func_name,
+ self._mset['body'],
+ item))
LOG.debug(self.__dict__)
@@ -80,7 +98,10 @@ class MailSettings(sets.Settings):
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-mail-settings.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-mail-settings.log",
+ clevel=logging.INFO)
mail_settings = MailSettings()
mail_settings.sinput()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/perf_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/perf_settings.py
index 610cb4ae..adc8dee4 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/perf_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/perf_settings.py
@@ -20,6 +20,7 @@ LOG = logging.getLogger(__name__)
class PerfSettings(sets.Settings):
+
def __init__(self, path="/etc/vstf/perf/",
filename="sw_perf.batch-settings",
mode=sets.SETS_SINGLE):
@@ -42,14 +43,23 @@ class PerfSettings(sets.Settings):
if not scenario:
LOG.warn("not support the case:%s", value["case"])
return
- self._adding_file("add", self._mset, self._fset, scenario, check=self._check_add)(value)
+ self._adding_file(
+ "add",
+ self._mset,
+ self._fset,
+ scenario,
+ check=self._check_add)(value)
def madd_case(self, case):
scenario = self.dbconn.query_scenario(case)
if not scenario:
LOG.warn("not support the case:%s", case)
return
- self._adding_memory("madd", self._mset, scenario, check=self._check_add)(case)
+ self._adding_memory(
+ "madd",
+ self._mset,
+ scenario,
+ check=self._check_add)(case)
@deco.dcheck('sizes')
@deco.dcheck("type", choices=cst.TTYPES)
@@ -74,7 +84,7 @@ class PerfSettings(sets.Settings):
pprint.pprint(self.settings)
print "+++++++++++++++++++++++++++++++++++"
return True
-
+
@deco.vstf_input('sizes', types=list)
@deco.vstf_input("type", types=str, choices=cst.TTYPES)
@deco.vstf_input("profile", types=str, choices=cst.PROVIDERS)
@@ -98,7 +108,10 @@ def unit_test():
perf_settings.sinput()
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-perf-settings.log", clevel=logging.DEBUG)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-perf-settings.log",
+ clevel=logging.DEBUG)
if __name__ == '__main__':
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/settings.py
index 2c712bb2..a01689d5 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/settings.py
@@ -31,7 +31,8 @@ def dict2object(dic):
module_name = dic.pop('__module__')
module = __import__(module_name)
class_ = getattr(module, class_name)
- args = dict((key.encode('ascii'), value) for key, value in dic.items()) # get args
+ args = dict((key.encode('ascii'), value)
+ for key, value in dic.items()) # get args
inst = class_(**args) # create new instance
else:
inst = dic
@@ -52,6 +53,7 @@ def filter_comments(filename, flags="//"):
class BaseSettings(object):
+
def _load(self, fullname):
data = filter_comments(fullname)
LOG.debug(fullname)
@@ -68,7 +70,11 @@ class BaseSettings(object):
for litem in ldata:
if rdata:
for ritem in rdata:
- if isinstance(litem, dict) or isinstance(litem, list):
+ if isinstance(
+ litem,
+ dict) or isinstance(
+ litem,
+ list):
tmp = self._sub(litem, ritem)
else:
tmp = ritem
@@ -104,15 +110,22 @@ class BaseSettings(object):
if os.path.exists(filename):
os.remove(filename)
with open(filename, 'w') as ofile:
- content = json.dumps(data, sort_keys=True, indent=4, separators=(',', ':'))
+ content = json.dumps(
+ data,
+ sort_keys=True,
+ indent=4,
+ separators=(
+ ',',
+ ':'))
ofile.write(content)
class DefaultSettings(BaseSettings):
+
def __init__(self, path):
self._default = os.path.join(path, 'default')
self._user = os.path.join(path, 'user')
-
+
def load(self, filename):
dfile = os.path.join(self._default, filename)
if os.path.exists(dfile):
@@ -137,6 +150,7 @@ class DefaultSettings(BaseSettings):
class SingleSettings(BaseSettings):
+
def __init__(self, path):
self._path = path
@@ -161,6 +175,7 @@ SETTINGS = [SETS_SINGLE, SETS_DEFAULT]
class Settings(object):
+
def __init__(self, path, filename, mode=SETS_SINGLE):
if mode not in SETTINGS:
raise Exception("error Settings mode : %s" % (mode))
@@ -257,23 +272,65 @@ class Settings(object):
for item in items:
item = item.encode()
func_name = "set_%s" % item
- setattr(self, func_name, self._setting_file(func_name, self._mset, self._fset, item))
+ setattr(
+ self,
+ func_name,
+ self._setting_file(
+ func_name,
+ self._mset,
+ self._fset,
+ item))
func_name = "mset_%s" % item
- setattr(self, func_name, self._setting_memory(func_name, self._mset, item))
+ setattr(
+ self,
+ func_name,
+ self._setting_memory(
+ func_name,
+ self._mset,
+ item))
elif isinstance(self._fset, list):
func_name = "set"
- setattr(self, func_name, self._setting_file(func_name, self._mset, self._fset, None))
+ setattr(
+ self,
+ func_name,
+ self._setting_file(
+ func_name,
+ self._mset,
+ self._fset,
+ None))
func_name = "mset"
- setattr(self, func_name, self._setting_memory(func_name, self._mset, None))
+ setattr(
+ self,
+ func_name,
+ self._setting_memory(
+ func_name,
+ self._mset,
+ None))
func_name = "add"
- setattr(self, func_name, self._adding_file(func_name, self._mset, self._fset, None))
+ setattr(
+ self,
+ func_name,
+ self._adding_file(
+ func_name,
+ self._mset,
+ self._fset,
+ None))
func_name = "madd"
- setattr(self, func_name, self._adding_memory(func_name, self._mset, None))
+ setattr(
+ self,
+ func_name,
+ self._adding_memory(
+ func_name,
+ self._mset,
+ None))
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf-settings.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf-settings.log",
+ clevel=logging.INFO)
path = '/etc/vstf'
setting = DefaultSettings(path)
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/template_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/template_settings.py
index b677c539..2e449fef 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/template_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/template_settings.py
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
class TemplateSettings(sets.Settings):
+
def __init__(self, path="/etc/vstf/reporter/",
filename="reporters.template-settings",
mode=sets.SETS_SINGLE):
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/tester_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/tester_settings.py
index 554c8042..5d64d29c 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/tester_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/tester_settings.py
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
class TesterSettings(sets.Settings):
+
def __init__(self, path="/etc/vstf/env/",
filename="tester.json",
mode=sets.SETS_SINGLE):
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/settings/tool_settings.py b/testsuites/vstf/vstf_scripts/vstf/controller/settings/tool_settings.py
index a84bc59f..aed3306f 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/settings/tool_settings.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/settings/tool_settings.py
@@ -18,7 +18,12 @@ LOG = logging.getLogger(__name__)
class ToolSettings(sets.Settings):
- def __init__(self, path="/etc/vstf", filename="sw_perf.tool-settings", mode=sets.SETS_DEFAULT):
+
+ def __init__(
+ self,
+ path="/etc/vstf",
+ filename="sw_perf.tool-settings",
+ mode=sets.SETS_DEFAULT):
super(ToolSettings, self).__init__(path, filename, mode)
def _register_func(self):
@@ -29,8 +34,15 @@ class ToolSettings(sets.Settings):
for item in body:
item = item.encode()
func_name = "set_%s" % (item)
- setattr(self, func_name,
- self._setting_file(func_name, self._mset, self._fset, item, check=self._check_keys))
+ setattr(
+ self,
+ func_name,
+ self._setting_file(
+ func_name,
+ self._mset,
+ self._fset,
+ item,
+ check=self._check_keys))
def _check_keys(self, value):
keys = ['threads', 'wait', 'time']
@@ -70,7 +82,10 @@ class ToolSettings(sets.Settings):
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/tool-settings.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/tool-settings.log",
+ clevel=logging.INFO)
tool_settings = ToolSettings()
value = {
"time": 10,
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/spirent/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/spirent/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/spirent/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/spirent/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/spirent/appliance.py b/testsuites/vstf/vstf_scripts/vstf/controller/spirent/appliance.py
index d4c5be64..610b27dc 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/spirent/appliance.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/spirent/appliance.py
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
class spirentSTC(object):
+
def __init__(self):
super(spirentSTC, self).__init__()
self.runmodel = None
@@ -25,7 +26,7 @@ class spirentSTC(object):
:param str conner: the spirent tester, the agent id of spirent vm
:param list measurand: the tested host's agent id
:param str model: the model used of the tested host
-
+
"""
mgr = stevedore.driver.DriverManager(namespace="spirent.model.plugins",
name=model,
@@ -41,7 +42,7 @@ class spirentSTC(object):
def run(config):
- # test option parser
+ # test option parser
if not os.path.exists(config['configfile']):
LOG.error('The config file %s does exist.', config.get("configfile"))
return False
@@ -54,7 +55,7 @@ def run(config):
LOG.error("[ERROR]Check parameter invalid.")
return False
- # check logical parameter in the
+ # check logical parameter in the
flag = runmodel.check_logic_invalid
if not flag:
LOG.error("[ERROR]Check logic parameter with host invalid.")
@@ -86,7 +87,8 @@ def run(config):
LOG.error("[ERROR]Restructure the test data failed.")
perfdata = getResult(result_dict)
columndata = getResultColumn(result_dict)
- column_array, data_array = analysis_instance.analyseResult(suite, columndata, perfdata)
+ column_array, data_array = analysis_instance.analyseResult(
+ suite, columndata, perfdata)
temp = {'columns': column_array, 'data': data_array}
result[suite] = temp
return result
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/model.py b/testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/model.py
index a29794f4..38bfa70e 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/model.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/model.py
@@ -24,6 +24,7 @@ reverse_dict = {
class BaseModel(object):
+
def __init__(self, config):
self.config = config
@@ -117,6 +118,7 @@ def _tranfer_array_to_range(array):
class TnV(BaseModel):
+
def __init__(self, config):
super(TnV, self).__init__(config)
self.config = config
@@ -178,7 +180,8 @@ class TnV(BaseModel):
info = self.handle.get(option, 'macs')
macs = info.split()
if len(macs) != int(self.config['virtenv']) or macs == []:
- print("[ERROR]The macs number is not equal to vms or containers.")
+ print(
+ "[ERROR]The macs number is not equal to vms or containers.")
return False
for mac in macs:
# check mac valid
@@ -211,12 +214,12 @@ class TnV(BaseModel):
@property
def check_logic_invalid(self):
return self.flow_match() and self.match_virt_env() and \
- self.match_flows_and_nic and self.check_mac_valid() and \
- self.check_vlan_valid()
+ self.match_flows_and_nic and self.check_mac_valid() and \
+ self.check_vlan_valid()
@property
def read_flow_init(self):
- # The
+ # The
temp_flow = {}
src_macs = self._get_range('send', 'macs')
dst_macs = self._get_range('recv', 'macs')
@@ -233,7 +236,7 @@ class TnV(BaseModel):
temp_flow['tester_ip'] = self._get_nic_from_file('common', 'tester_ip')
vlan = src_vlan
avg_flow = int(self.config['flows']) / int(self.config['virtenv'])
- # build the main dictionary
+ # build the main dictionary
for _direct in sorted(fwd[self.config['direct']]):
i = 0
j = 0
@@ -267,9 +270,11 @@ class TnV(BaseModel):
temp_flow['qemu_thread_list'] = _vm_info['qemu_thread']
forward_core = {
- "forward": _vm_info['qemu_thread'][_queue + avg_flow * vm_index],
- "reverse": _vm_info['qemu_thread'][_queue + avg_flow * vm_index + int(self.config['flows'])]
- }
+ "forward": _vm_info['qemu_thread'][
+ _queue + avg_flow * vm_index],
+ "reverse": _vm_info['qemu_thread'][
+ _queue + avg_flow * vm_index + int(
+ self.config['flows'])]}
temp_flow['fwd_thread'] = forward_core[_direct]
temp_flow['fwd_vhost'] = None
@@ -280,7 +285,8 @@ class TnV(BaseModel):
temp_flow['dst_nic'] = dst_nic
# above all
j += 1
- self.init_flows[_direct + '_' + _vm + '_' + str(_queue)] = copy.deepcopy(temp_flow)
+ self.init_flows[_direct + '_' + _vm + '_' +
+ str(_queue)] = copy.deepcopy(temp_flow)
i += 1
src_nic_irq, dst_nic_irq = dst_nic_irq, src_nic_irq
vlan = dst_vlan
@@ -323,30 +329,67 @@ class TnV(BaseModel):
try:
i += 1
thread_info = None
- self.mac_learning(self.init_flows[_direct + '_' + _vm + '_' + str(_queue)],
- self.init_flows[reverse_dict[_direct] + '_' + _vm + '_' + str(_queue)])
- streamblock = self.send_packet(self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
+ self.mac_learning(
+ self.init_flows[
+ _direct +
+ '_' +
+ _vm +
+ '_' +
+ str(_queue)],
+ self.init_flows[
+ reverse_dict[_direct] +
+ '_' +
+ _vm +
+ '_' +
+ str(_queue)])
+ streamblock = self.send_packet(
+ self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
time.sleep(1)
result, thread_info = self.catch_thread_info()
thread_info = eval(thread_info)
- self.stop_flow(streamblock, self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
+ self.stop_flow(
+ streamblock, self.init_flows[
+ _direct + '_' + _vm + '_' + str(_queue)])
time.sleep(1)
if not result:
print("[ERROR]Catch the thread info failed.")
break
except:
- print("[ERROR]send flow failed error or get host thread info failed.")
+ print(
+ "[ERROR]send flow failed error or get host thread info failed.")
# compare the got thread info to
- if check_dict(thread_info, self.init_flows[_direct + '_' + _vm + '_' + str(_queue)]):
- self.set_thread2flow(thread_info, self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
- print("[INFO]Flow %s_%s_%s : fwd_vhost %s src_recv_irq %s dst_send_irq %s"
- % (_direct, _vm, _queue, thread_info['fwd_vhost'], thread_info['src_recv_irq'],
- thread_info['dst_send_irq']))
- print("%s" % (self.init_flows[_direct + '_' + _vm + '_' + str(_queue)]))
+ if check_dict(
+ thread_info, self.init_flows[
+ _direct + '_' + _vm + '_' + str(_queue)]):
+ self.set_thread2flow(
+ thread_info, self.init_flows[
+ _direct + '_' + _vm + '_' + str(_queue)])
+ print(
+ "[INFO]Flow %s_%s_%s : fwd_vhost %s src_recv_irq %s dst_send_irq %s" %
+ (_direct,
+ _vm,
+ _queue,
+ thread_info['fwd_vhost'],
+ thread_info['src_recv_irq'],
+ thread_info['dst_send_irq']))
+ print(
+ "%s" %
+ (self.init_flows[
+ _direct +
+ '_' +
+ _vm +
+ '_' +
+ str(_queue)]))
break
else:
- dst_ip_update(self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
+ dst_ip_update(
+ self.init_flows[
+ _direct +
+ '_' +
+ _vm +
+ '_' +
+ str(_queue)])
return self.init_flows
def affinity_bind(self, aff_strategy):
@@ -361,7 +404,8 @@ class TnV(BaseModel):
# recognize the thread id
for flowname in sorted(self.init_flows.keys()):
tmp_thread = self.init_flows[flowname]['fwd_thread']
- qemu_other = qemu_other + copy.deepcopy(self.init_flows[flowname]['qemu_thread_list'])
+ qemu_other = qemu_other + \
+ copy.deepcopy(self.init_flows[flowname]['qemu_thread_list'])
qemu_list.append(tmp_thread)
if self.init_flows[flowname]['direct'] == 'forward':
dst_vhost.append(self.init_flows[flowname]['fwd_vhost'])
@@ -386,16 +430,38 @@ class TnV(BaseModel):
handle = ConfigParser.ConfigParser()
handle.read(self.config['strategyfile'])
try:
- qemu_numa = handle.get('strategy' + self.config['strategy'], 'qemu_numa')
- src_vhost_numa = handle.get('strategy' + self.config['strategy'], 'src_vhost_numa')
- dst_vhost_numa = handle.get('strategy' + self.config['strategy'], 'dst_vhost_numa')
- src_irq_numa = handle.get('strategy' + self.config['strategy'], 'src_irq_numa')
- dst_irq_numa = handle.get('strategy' + self.config['strategy'], 'dst_irq_numa')
- loan_numa = handle.get('strategy' + self.config['strategy'], 'loan_numa')
+ qemu_numa = handle.get(
+ 'strategy' +
+ self.config['strategy'],
+ 'qemu_numa')
+ src_vhost_numa = handle.get(
+ 'strategy' + self.config['strategy'],
+ 'src_vhost_numa')
+ dst_vhost_numa = handle.get(
+ 'strategy' + self.config['strategy'],
+ 'dst_vhost_numa')
+ src_irq_numa = handle.get(
+ 'strategy' +
+ self.config['strategy'],
+ 'src_irq_numa')
+ dst_irq_numa = handle.get(
+ 'strategy' +
+ self.config['strategy'],
+ 'dst_irq_numa')
+ loan_numa = handle.get(
+ 'strategy' +
+ self.config['strategy'],
+ 'loan_numa')
except:
print("[ERROR]Parse the strategy file failed or get the options failed.")
- for value in [qemu_numa, src_vhost_numa, dst_vhost_numa, src_irq_numa, dst_irq_numa, loan_numa]:
+ for value in [
+ qemu_numa,
+ src_vhost_numa,
+ dst_vhost_numa,
+ src_irq_numa,
+ dst_irq_numa,
+ loan_numa]:
if value is not None or value == '':
raise ValueError('some option in the strategy file is none.')
# cores mapping thread
@@ -407,26 +473,39 @@ class TnV(BaseModel):
for node in numa_topo.keys():
numa_topo[node]['process'] = []
if 'node' + src_irq_numa == node:
- numa_topo[node]['process'] = numa_topo[node]['process'] + src_irq
+ numa_topo[node]['process'] = numa_topo[
+ node]['process'] + src_irq
if 'node' + dst_irq_numa == node:
- numa_topo[node]['process'] = numa_topo[node]['process'] + dst_irq
+ numa_topo[node]['process'] = numa_topo[
+ node]['process'] + dst_irq
if 'node' + src_vhost_numa == node:
- numa_topo[node]['process'] = numa_topo[node]['process'] + src_vhost
+ numa_topo[node]['process'] = numa_topo[
+ node]['process'] + src_vhost
if 'node' + dst_vhost_numa == node:
- numa_topo[node]['process'] = numa_topo[node]['process'] + dst_vhost
+ numa_topo[node]['process'] = numa_topo[
+ node]['process'] + dst_vhost
if 'node' + qemu_numa == node:
- numa_topo[node]['process'] = numa_topo[node]['process'] + qemu_list
+ numa_topo[node]['process'] = numa_topo[
+ node]['process'] + qemu_list
loan_cores = ''
for node in numa_topo.keys():
- if len(numa_topo[node]['process']) > len(numa_topo[node]['phy_cores']):
+ if len(
+ numa_topo[node]['process']) > len(
+ numa_topo[node]['phy_cores']):
# length distance
- diff = len(numa_topo[node]['process']) - len(numa_topo[node]['phy_cores'])
+ diff = len(numa_topo[node]['process']) - \
+ len(numa_topo[node]['phy_cores'])
# first deep copy
- numa_topo['node' + loan_numa]['process'] = numa_topo['node' + loan_numa]['process'] + copy.deepcopy(
- numa_topo[node]['process'][-diff:])
- cores_str = _tranfer_array_to_range(numa_topo['node' + loan_numa]['phy_cores'][diff:])
+ numa_topo['node' + loan_numa]['process'] = numa_topo['node' + loan_numa][
+ 'process'] + copy.deepcopy(numa_topo[node]['process'][-diff:])
+ cores_str = _tranfer_array_to_range(
+ numa_topo[
+ 'node' +
+ loan_numa]['phy_cores'][
+ diff:])
loan_cores = ','.join([loan_cores, cores_str])
- numa_topo[node]['process'] = numa_topo[node]['process'][0:-diff]
+ numa_topo[node]['process'] = numa_topo[
+ node]['process'][0:-diff]
loan_cores = loan_cores[1:]
loan_bind_list = {}
for proc_loan in qemu_other:
@@ -435,7 +514,8 @@ class TnV(BaseModel):
bind_list = {}
for node in numa_topo.keys():
for i in range(len(numa_topo[node]['process'])):
- bind_list[numa_topo[node]['process'][i]] = str(numa_topo[node]['phy_cores'][i])
+ bind_list[numa_topo[node]['process'][i]] = str(
+ numa_topo[node]['phy_cores'][i])
bind_list.update(loan_bind_list)
for key in bind_list.keys():
self.host_instance.bind_cpu(bind_list[key], key)
@@ -459,8 +539,10 @@ class TnV(BaseModel):
if suite == "throughput":
print("[INFO]!!!!!!!!!!!!!!!Now begin to throughput test")
- ret, result = self.send_instace.run_rfc2544_throughput(forward_init_flows, reverse_init_flows)
+ ret, result = self.send_instace.run_rfc2544_throughput(
+ forward_init_flows, reverse_init_flows)
elif suite == "frameloss":
print("[INFO]!!!!!!!!!!!1!!!Now begin to frameloss test")
- ret, result = self.send_instace.run_rfc2544_frameloss(forward_init_flows, reverse_init_flows)
+ ret, result = self.send_instace.run_rfc2544_frameloss(
+ forward_init_flows, reverse_init_flows)
return ret, result
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/result_analysis.py b/testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/result_analysis.py
index b09a846d..9003f063 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/result_analysis.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/result_analysis.py
@@ -29,9 +29,11 @@ def restrucData(data_string):
try:
data_dict = {}
p = re.compile('-Columns.*-Output')
- data_dict['Columns'] = p.findall(data_string)[0].strip('-Columns {} -Output')
+ data_dict['Columns'] = p.findall(
+ data_string)[0].strip('-Columns {} -Output')
p = re.compile('-Output.*-State')
- data_dict['Output'] = p.findall(data_string)[0].strip('-Output {} -State')
+ data_dict['Output'] = p.findall(
+ data_string)[0].strip('-Output {} -State')
if data_dict['Columns'] is not None or data_dict['Output'] is not None:
return False, None
return True, data_dict
@@ -90,6 +92,7 @@ def framelossData(column, perfdata):
class analysis(object):
+
def __init__(self):
pass
@@ -148,7 +151,8 @@ class analysis(object):
line[column_name_dict['FrameSize']],
line[column_name_dict['Load(%)']],
line[column_name_dict['Result']],
- str(float(line[column_name_dict['ForwardingRate(mpps)']]) / 1000000),
+ str(float(line[column_name_dict[
+ 'ForwardingRate(mpps)']]) / 1000000),
line[column_name_dict['TxFrameCount']],
line[column_name_dict['RxFrameCount']],
line[column_name_dict['AverageLatency(us)']],
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/flow_producer.py b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/flow_producer.py
index 4a3b02c2..a4bd1467 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/flow_producer.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/flow_producer.py
@@ -21,6 +21,7 @@ LOG = logging.getLogger(__name__)
class FlowsProducer(object):
+
def __init__(self, conn, flows_settings):
self._perf = flows_settings
self._forwarding = ForwardingSettings().settings
@@ -43,12 +44,13 @@ class FlowsProducer(object):
raise Exception("error devs :%s", devs)
LOG.info(agent)
LOG.info(name)
- if not self._devs_map.has_key((agent, name)):
+ if (agent, name) not in self._devs_map:
query = Fabricant(agent, self._conn)
query.clean_all_namespace()
dev_info = query.get_device_verbose(identity=name)
if not isinstance(dev_info, dict):
- err = "get device detail failed, agent:%s net:%s" % (agent, name)
+ err = "get device detail failed, agent:%s net:%s" % (
+ agent, name)
raise Exception(err)
dev = {
"agent": agent,
@@ -127,7 +129,10 @@ class FlowsProducer(object):
def unit_test():
from vstf.rpc_frame_work.rpc_producer import Server
from vstf.common.log import setup_logging
- setup_logging(level=logging.INFO, log_file="/var/log/vstf/vstf-producer.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.INFO,
+ log_file="/var/log/vstf/vstf-producer.log",
+ clevel=logging.INFO)
conn = Server("192.168.188.10")
flow_settings = FlowsSettings()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/model.py b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/model.py
index c49df041..8e3e7b22 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/model.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/model.py
@@ -17,6 +17,7 @@ LOG = logging.getLogger(__name__)
class NetDeviceMgr(Fabricant):
+
@classmethod
def add(cls, dst, conn, dev):
self = cls(dst, conn)
@@ -38,6 +39,7 @@ class NetDeviceMgr(Fabricant):
class Actor(Fabricant):
+
def __init__(self, dst, conn, tool, params):
super(Actor, self).__init__(dst, conn)
self._tool = tool
@@ -46,12 +48,13 @@ class Actor(Fabricant):
def __repr__(self):
repr_dict = self.__dict__
- repr_keys = list(repr_dict.keys())
- repr_keys.sort()
- return '%s(%s)' % (self.__class__.__name__, ', '.join(['%s=%r' % (k, repr_dict[k]) for k in repr_keys]))
+ repr_keys = sorted(repr_dict.keys())
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(
+ ['%s=%r' % (k, repr_dict[k]) for k in repr_keys]))
class Sender(Actor):
+
def start(self, pktsize, **kwargs):
LOG.info("Sender.start")
if 'ratep' in kwargs and kwargs['ratep']:
@@ -106,6 +109,7 @@ class Sender(Actor):
class Receiver(Actor):
+
def start(self, **kwargs):
LOG.info("Receiver.start")
ret, info = self.perf_run(
@@ -136,6 +140,7 @@ class Receiver(Actor):
class NicWatcher(Fabricant):
+
def __init__(self, dst, conn, params):
super(NicWatcher, self).__init__(dst, conn)
self._params = params
@@ -144,7 +149,9 @@ class NicWatcher(Fabricant):
def start(self):
print "NicWatcher.start"
- self._pid = self.run_vnstat(device=self._params["iface"], namespace=self._params["namespace"])
+ self._pid = self.run_vnstat(
+ device=self._params["iface"],
+ namespace=self._params["namespace"])
print self._pid
def stop(self):
@@ -161,6 +168,7 @@ class NicWatcher(Fabricant):
class CpuWatcher(Fabricant):
+
def __init__(self, dst, conn):
super(CpuWatcher, self).__init__(dst, conn)
self._pid = None
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/perf_provider.py b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/perf_provider.py
index 396e6eef..3fdbad60 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/perf_provider.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/perf_provider.py
@@ -26,6 +26,7 @@ def get_agent_dict(nodes):
class PerfProvider(object):
+
def __init__(self, flows_info, tool_info, tester_info):
self._flows_info = flows_info
self._tool_info = tool_info
@@ -33,7 +34,8 @@ class PerfProvider(object):
def _islation(self):
flows = self._flows_info["flows"]
- if flows == 2 and self._flows_info["senders"][0]["agent"] == self._flows_info["senders"][1]["agent"]:
+ if flows == 2 and self._flows_info["senders"][0][
+ "agent"] == self._flows_info["senders"][1]["agent"]:
return True
return False
@@ -53,8 +55,10 @@ class PerfProvider(object):
}
}
for i in range(flows):
- sender['params']['src'].append(self._flows_info["senders"][i]['dev'])
- sender['params']['dst'].append(self._flows_info["receivers"][i]['dev'])
+ sender['params']['src'].append(
+ self._flows_info["senders"][i]['dev'])
+ sender['params']['dst'].append(
+ self._flows_info["receivers"][i]['dev'])
result.append(sender)
else:
for i in range(flows):
@@ -63,12 +67,12 @@ class PerfProvider(object):
"params": {
"protocol": protocol,
"namespace": None if "netmap" == tool else self._flows_info["senders"][i]['dev']['namespace'],
- "src": [self._flows_info["senders"][i]['dev']],
- "dst": [self._flows_info["receivers"][i]['dev']],
+ "src": [
+ self._flows_info["senders"][i]['dev']],
+ "dst": [
+ self._flows_info["receivers"][i]['dev']],
"time": self._tool_info[tool]["time"],
- "threads": self._tool_info[tool]["threads"]
- }
- }
+ "threads": self._tool_info[tool]["threads"]}}
result.append(sender)
return result
@@ -91,9 +95,8 @@ class PerfProvider(object):
"params": {
"namespace": None if "netmap" == tool else self._flows_info["receivers"][i]['dev']['namespace'],
"protocol": protocol,
- "dst": [self._flows_info["receivers"][i]['dev']]
- }
- }
+ "dst": [
+ self._flows_info["receivers"][i]['dev']]}}
result.append(receiver)
return result
@@ -104,9 +107,10 @@ class PerfProvider(object):
"agent": watcher["agent"],
"params": {
"iface": watcher['dev']["iface"],
- "namespace": None if tool in ["pktgen", "netmap"] else watcher['dev']["namespace"],
- }
- }
+ "namespace": None if tool in [
+ "pktgen",
+ "netmap"] else watcher['dev']["namespace"],
+ }}
result.append(node)
return result
@@ -118,10 +122,12 @@ class PerfProvider(object):
"agent": watcher["agent"],
"params": {
"iface": watcher['dev']["iface"],
- "namespace": watcher['dev']["namespace"] if tool not in ["pktgen", "netmap"] else None,
- "ip": watcher['dev']["ip"] + '/24',
- }
- }
+ "namespace": watcher['dev']["namespace"] if tool not in [
+ "pktgen",
+ "netmap"] else None,
+ "ip": watcher['dev']["ip"] +
+ '/24',
+ }}
result.append(node)
return result
@@ -176,7 +182,10 @@ class PerfProvider(object):
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-perf-provider.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-perf-provider.log",
+ clevel=logging.INFO)
from vstf.controller.settings.flows_settings import FlowsSettings
from vstf.controller.settings.tool_settings import ToolSettings
@@ -186,7 +195,10 @@ def unit_test():
tool_settings = ToolSettings()
tester_settings = TesterSettings()
- provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+ provider = PerfProvider(
+ flows_settings.settings,
+ tool_settings.settings,
+ tester_settings.settings)
tools = ['pktgen']
protocols = ['udp_bw', 'udp_lat']
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/performance.py b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/performance.py
index 3fe91e93..7dc426f8 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/performance.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/performance.py
@@ -29,6 +29,7 @@ LOG = logging.getLogger(__name__)
class Performance(object):
+
def __init__(self, conn, provider):
self._provider = provider
self._conn = conn
@@ -221,7 +222,8 @@ class Performance(object):
lat_tool = "qperf"
lat_type = 'latency'
lat_tpro = protocol + '_lat'
- self.run_latency_test(lat_tool, lat_tpro, size, ratep=realspeed)
+ self.run_latency_test(
+ lat_tool, lat_tpro, size, ratep=realspeed)
lat_result = self.result(tool, lat_type)
LOG.info(bw_result)
LOG.info(lat_result)
@@ -272,23 +274,32 @@ class Performance(object):
record[mark.txMbps] += nic_data['txmB/s'] * 8
if record[mark.rxMbps] > record[mark.txMbps]:
- record[mark.rxMbps], record[mark.txMbps] = record[mark.txMbps], record[mark.rxMbps]
+ record[
+ mark.rxMbps], record[
+ mark.txMbps] = record[
+ mark.txMbps], record[
+ mark.rxMbps]
if record[mark.rxCount] > record[mark.txCount]:
- record[mark.rxCount], record[mark.txCount] = record[mark.txCount], record[mark.rxCount]
+ record[
+ mark.rxCount], record[
+ mark.txCount] = record[
+ mark.txCount], record[
+ mark.rxCount]
if record[mark.txCount]:
- record[mark.percentLoss] = round(100 * (1 - record[mark.rxCount] / record[mark.txCount]),
- cst.PKTLOSS_ROUND)
+ record[mark.percentLoss] = round(
+ 100 * (1 - record[mark.rxCount] / record[mark.txCount]), cst.PKTLOSS_ROUND)
else:
record[mark.percentLoss] = 100
record[mark.bandwidth] /= 1000000.0
if cpu_mhz and record[mark.cpu]:
- record[mark.mppsGhz] = round(record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000),
- cst.CPU_USAGE_ROUND)
+ record[mark.mppsGhz] = round(
+ record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000), cst.CPU_USAGE_ROUND)
- record[mark.bandwidth] = round(record[mark.bandwidth], cst.RATEP_ROUND)
+ record[mark.bandwidth] = round(
+ record[mark.bandwidth], cst.RATEP_ROUND)
elif ttype in {'latency'}:
record = {
@@ -319,7 +330,10 @@ class Performance(object):
def unit_test():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-sw_perf.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-sw_perf.log",
+ clevel=logging.INFO)
conn = Server("192.168.188.10")
perf_settings = PerfSettings()
@@ -327,7 +341,10 @@ def unit_test():
tool_settings = ToolSettings()
tester_settings = TesterSettings()
flow_producer = FlowsProducer(conn, flows_settings)
- provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+ provider = PerfProvider(
+ flows_settings.settings,
+ tool_settings.settings,
+ tester_settings.settings)
perf = Performance(conn, provider)
tests = perf_settings.settings
for scenario, cases in tests.items():
@@ -348,7 +365,10 @@ def unit_test():
def main():
from vstf.common.log import setup_logging
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-performance.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-performance.log",
+ clevel=logging.INFO)
from vstf.controller.database.dbinterface import DbManage
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("case",
@@ -374,9 +394,10 @@ def main():
action="store",
default="64",
help='test size list "64 128"')
- parser.add_argument("--affctl",
- action="store_true",
- help="when input '--affctl', the performance will do affctl before testing")
+ parser.add_argument(
+ "--affctl",
+ action="store_true",
+ help="when input '--affctl', the performance will do affctl before testing")
parser.add_argument("--monitor",
dest="monitor",
default="localhost",
@@ -399,7 +420,10 @@ def main():
tool_settings = ToolSettings()
tester_settings = TesterSettings()
flow_producer = FlowsProducer(conn, flows_settings)
- provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+ provider = PerfProvider(
+ flows_settings.settings,
+ tool_settings.settings,
+ tester_settings.settings)
perf = Performance(conn, provider)
scenario = db_mgr.query_scenario(casetag)
flow_producer.create(scenario, casetag)
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/raw_data.py b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/raw_data.py
index aefb863d..828981db 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/raw_data.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/raw_data.py
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
class RawDataProcess(object):
+
def __init__(self):
pass
@@ -24,7 +25,9 @@ class RawDataProcess(object):
buf = ' '.join(buf)
m = {}
digits = re.compile(r"\d{1,}\.?\d*")
- units = re.compile(r"(?:gib|mib|kib|kbit/s|gbit/s|mbit/s|p/s)", re.IGNORECASE | re.MULTILINE)
+ units = re.compile(
+ r"(?:gib|mib|kib|kbit/s|gbit/s|mbit/s|p/s)",
+ re.IGNORECASE | re.MULTILINE)
units_arr = units.findall(buf)
LOG.debug(units_arr)
digits_arr = digits.findall(buf)
@@ -96,9 +99,9 @@ class RawDataProcess(object):
m = self.process_vnstat(data)
if tool == 'sar' and data_type == 'cpu':
m = self.process_sar_cpu(data)
- if raw.has_key('cpu_num'):
+ if 'cpu_num' in raw:
m['cpu_num'] = raw['cpu_num']
- if raw.has_key('cpu_mhz'):
+ if 'cpu_mhz' in raw:
m['cpu_mhz'] = raw['cpu_mhz']
if tool == 'qperf':
m = self.process_qperf(data)
@@ -121,7 +124,10 @@ if __name__ == '__main__':
print p.process_vnstat(data)
cmd = "sar -u 2"
- child = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ child = subprocess.Popen(
+ cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
import time
import os
from signal import SIGINT
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/__init__.py b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/__init__.py
index df7d24d0..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/__init__.py
@@ -6,4 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/configuration.py b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/configuration.py
index 6312efa2..5131e8d4 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/configuration.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/configuration.py
@@ -18,4 +18,4 @@ source_repo = {
"passwd": "root",
"ip": "192.168.188.10",
"user": "root"
-} \ No newline at end of file
+}
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/model.py b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/model.py
index 4c38973e..b6e37ff5 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/model.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/model.py
@@ -28,4 +28,4 @@ class Test(unittest.TestCase):
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
- unittest.main() \ No newline at end of file
+ unittest.main()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_cfg_intent_parse.py b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_cfg_intent_parse.py
index a08607bb..a0cf2a33 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_cfg_intent_parse.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_cfg_intent_parse.py
@@ -15,6 +15,7 @@ from vstf.controller.env_build.cfg_intent_parse import IntentParser
class Test(model.Test):
+
def setUp(self):
super(Test, self).setUp()
self.dir = os.path.dirname(__file__)
@@ -33,4 +34,4 @@ if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
- unittest.main() \ No newline at end of file
+ unittest.main()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_collect.py b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_collect.py
index 3e84d019..e8e9dd8c 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_collect.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_collect.py
@@ -15,31 +15,41 @@ from vstf.controller.unittest import model
class TestCollect(model.Test):
-
+
def setUp(self):
super(TestCollect, self).setUp()
self.obj = env_collect.EnvCollectApi(self.conn)
-
+
def test_collect_host_info(self):
- ret_str = json.dumps(self.obj.collect_host_info(self.tester_host), indent = 4)
- for key in ("CPU INFO","MEMORY INFO","HW_INFO","OS INFO"):
- self.assertTrue(key in ret_str, "collect_host_info failed, ret_str = %s" % ret_str)
-
+ ret_str = json.dumps(
+ self.obj.collect_host_info(
+ self.tester_host), indent=4)
+ for key in ("CPU INFO", "MEMORY INFO", "HW_INFO", "OS INFO"):
+ self.assertTrue(
+ key in ret_str,
+ "collect_host_info failed, ret_str = %s" %
+ ret_str)
+
def test_list_nic_devices(self):
- ret_str = json.dumps(self.obj.list_nic_devices(self.tester_host), indent = 4)
- for key in ("device","mac","bdf","desc"):
- self.assertTrue(key in ret_str, "list_nic_devices failed, ret_str = %s" % ret_str)
+ ret_str = json.dumps(
+ self.obj.list_nic_devices(
+ self.tester_host), indent=4)
+ for key in ("device", "mac", "bdf", "desc"):
+ self.assertTrue(
+ key in ret_str,
+ "list_nic_devices failed, ret_str = %s" %
+ ret_str)
print ret_str
-
+
def test_get_device_detail(self):
identity = "01:00.0"
ret = self.obj.get_device_detail(self.tester_host, "01:00.0")
- for key in ("device","mac","bdf","desc"):
+ for key in ("device", "mac", "bdf", "desc"):
self.assertTrue(key in ret)
self.assertTrue(ret['bdf'] == identity)
if __name__ == "__main__":
import logging
- logging.basicConfig(level = logging.INFO)
- unittest.main() \ No newline at end of file
+ logging.basicConfig(level=logging.INFO)
+ unittest.main()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_driver_function.py b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_driver_function.py
index 8d45c7b3..a8b1b018 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_driver_function.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_driver_function.py
@@ -14,19 +14,22 @@ from vstf.controller.functiontest.driver.drivertest import config_setup
from vstf.controller.unittest import model
-class TestDriverFunction(model.Test):
+class TestDriverFunction(model.Test):
+
def setUp(self):
logging.info("start driver function test unit test.")
-
+
def test_config_setup(self):
- config ,_ = config_setup()
- for key in ("test_scene","bond_flag","switch_module"):
- self.assertTrue(key in config.keys(), "config_setup function failure.")
+ config, _ = config_setup()
+ for key in ("test_scene", "bond_flag", "switch_module"):
+ self.assertTrue(
+ key in config.keys(),
+ "config_setup function failure.")
def teardown(self):
logging.info("stop driver function test unit test.")
if __name__ == "__main__":
import logging
- logging.basicConfig(level = logging.INFO)
- unittest.main() \ No newline at end of file
+ logging.basicConfig(level=logging.INFO)
+ unittest.main()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_env_build.py b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_env_build.py
index e4529e48..5f9d047f 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_env_build.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_env_build.py
@@ -15,45 +15,46 @@ from vstf.controller.env_build import env_build
class TestEnvBuilder(model.Test):
+
def setUp(self):
super(TestEnvBuilder, self).setUp()
self.dir = os.path.dirname(__file__)
-
+
@unittest.skip('for now')
def test_build_tn(self):
- filepath = os.path.join(self.dir,'../../../etc/vstf/env/Tn.json')
+ filepath = os.path.join(self.dir, '../../../etc/vstf/env/Tn.json')
self.mgr = env_build.EnvBuildApi(self.conn, filepath)
ret = self.mgr.build()
self.assertTrue(ret, "build_tn failed,ret = %s" % ret)
-
+
@unittest.skip('for now')
def test_build_tn1v(self):
- filepath = os.path.join(self.dir,'../../../etc/vstf/env/Tnv.json')
+ filepath = os.path.join(self.dir, '../../../etc/vstf/env/Tnv.json')
self.mgr = env_build.EnvBuildApi(self.conn, filepath)
ret = self.mgr.build()
self.assertTrue(ret, "build_tn1v failed,ret = %s" % ret)
-
+
@unittest.skip('for now')
def test_build_ti(self):
- filepath = os.path.join(self.dir,'../../../etc/vstf/env/Ti.json')
+ filepath = os.path.join(self.dir, '../../../etc/vstf/env/Ti.json')
self.mgr = env_build.EnvBuildApi(self.conn, filepath)
ret = self.mgr.build()
self.assertTrue(ret, "build_ti failed,ret = %s" % ret)
-
+
@unittest.skip('for now')
def test_build_tu(self):
- filepath = os.path.join(self.dir,'../../../etc/vstf/env/Tu.json')
+ filepath = os.path.join(self.dir, '../../../etc/vstf/env/Tu.json')
self.mgr = env_build.EnvBuildApi(self.conn, filepath)
ret = self.mgr.build()
self.assertTrue(ret, "build_tu failed,ret = %s" % ret)
-
+
def test_build_tu_bridge(self):
- filepath = os.path.join(self.dir,'../../../etc/vstf/env/Tu_br.json')
+ filepath = os.path.join(self.dir, '../../../etc/vstf/env/Tu_br.json')
self.mgr = env_build.EnvBuildApi(self.conn, filepath)
ret = self.mgr.build()
self.assertTrue(ret, "build_tu failed,ret = %s" % ret)
-
+
if __name__ == "__main__":
import logging
- logging.basicConfig(level = logging.INFO)
- unittest.main() \ No newline at end of file
+ logging.basicConfig(level=logging.INFO)
+ unittest.main()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_perf.py b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_perf.py
index 0258ab65..4e2a2ea9 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_perf.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_perf.py
@@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
class TestPerf(model.Test):
-
+
def setUp(self):
LOG.info("start performance unit test.")
super(TestPerf, self).setUp()
@@ -48,7 +48,10 @@ class TestPerf(model.Test):
tool_settings = ToolSettings(path=self.base_path)
tester_settings = TesterSettings(path=self.base_path)
flow_producer = FlowsProducer(self.conn, flows_settings)
- provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+ provider = PerfProvider(
+ flows_settings.settings,
+ tool_settings.settings,
+ tester_settings.settings)
perf = pf.Performance(self.conn, provider)
tests = perf_settings.settings
for scenario, cases in tests.items():
@@ -120,5 +123,8 @@ class TestPerf(model.Test):
if __name__ == "__main__":
- setup_logging(level=logging.INFO, log_file="/var/log/vstf/vstf-unit-test.log", clevel=logging.INFO)
- unittest.main() \ No newline at end of file
+ setup_logging(
+ level=logging.INFO,
+ log_file="/var/log/vstf/vstf-unit-test.log",
+ clevel=logging.INFO)
+ unittest.main()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_ssh.py b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_ssh.py
index f2403551..c9aadb04 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_ssh.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_ssh.py
@@ -21,16 +21,14 @@ class Test(model.Test):
self.user = self.source_repo["user"]
self.passwd = self.source_repo["passwd"]
-
def tearDown(self):
super(Test, self).tearDown()
-
def test_run_cmd(self):
ssh.run_cmd(self.host, self.user, self.passwd, 'ls')
if __name__ == "__main__":
import logging
- logging.basicConfig(level = logging.INFO)
- unittest.main() \ No newline at end of file
+ logging.basicConfig(level=logging.INFO)
+ unittest.main()
diff --git a/testsuites/vstf/vstf_scripts/vstf/controller/vstfadm.py b/testsuites/vstf/vstf_scripts/vstf/controller/vstfadm.py
index 86641e7d..1546b47c 100644
--- a/testsuites/vstf/vstf_scripts/vstf/controller/vstfadm.py
+++ b/testsuites/vstf/vstf_scripts/vstf/controller/vstfadm.py
@@ -36,7 +36,8 @@ def make_msg(method, **kwargs):
return {"method": method, "args": kwargs}
-@cliutil.arg("--host", dest="host", default="", action="store", help="list nic devices of specified host")
+@cliutil.arg("--host", dest="host", default="", action="store",
+ help="list nic devices of specified host")
def do_list_devs(args):
"""List the host's all netdev."""
ret = call(make_msg("list_devs", host=args.host))
@@ -51,19 +52,44 @@ def do_src_install(args):
"""work agent to pull source code and compile.
use git as underlying mechanism, please make sure the host has access to git repo.
"""
- ret = call(make_msg("src_install", host=args.host, config_file=args.config_file))
+ ret = call(
+ make_msg(
+ "src_install",
+ host=args.host,
+ config_file=args.config_file))
print_stdout(ret)
-@cliutil.arg("--host", dest="host", action="store", default=None,
- help="which host to build, must exists in your config file, use default[None] value to build all hosts.")
-@cliutil.arg("--model", dest="model", action="store", choices=('Tn', 'Ti', 'Tu', 'Tnv'),
- help="which model to build, if specified, the according config file /etc/vstf/env/{model}.json must exist.")
-@cliutil.arg("--config_file", dest="config_file", action="store", default=None,
- help="if specified, the config file will replace the default config file from /etc/vstf/env.")
+@cliutil.arg(
+ "--host",
+ dest="host",
+ action="store",
+ default=None,
+ help="which host to build, must exists in your config file, use default[None] value to build all hosts.")
+@cliutil.arg(
+ "--model",
+ dest="model",
+ action="store",
+ choices=(
+ 'Tn',
+ 'Ti',
+ 'Tu',
+ 'Tnv'),
+ help="which model to build, if specified, the according config file /etc/vstf/env/{model}.json must exist.")
+@cliutil.arg(
+ "--config_file",
+ dest="config_file",
+ action="store",
+ default=None,
+ help="if specified, the config file will replace the default config file from /etc/vstf/env.")
def do_apply_model(args):
"""Apply model to the host."""
- ret = call(make_msg("apply_model", host=args.host, model=args.model, config_file=args.config_file))
+ ret = call(
+ make_msg(
+ "apply_model",
+ host=args.host,
+ model=args.model,
+ config_file=args.config_file))
print_stdout(ret)
@@ -73,7 +99,11 @@ def do_apply_model(args):
help="configuration file for image creation.")
def do_create_images(args):
"""create images on host, images are configed by configuration file."""
- ret = call(make_msg("create_images", host=args.host, config_file=args.config_file))
+ ret = call(
+ make_msg(
+ "create_images",
+ host=args.host,
+ config_file=args.config_file))
print_stdout(ret)
@@ -83,23 +113,49 @@ def do_create_images(args):
help="configuration file for images.")
def do_clean_images(args):
"""clean images on host, images are configed by configuration file."""
- ret = call(make_msg("clean_images", host=args.host, config_file=args.config_file))
+ ret = call(
+ make_msg(
+ "clean_images",
+ host=args.host,
+ config_file=args.config_file))
print_stdout(ret)
-@cliutil.arg("--host", dest="host", action="store", default=None,
- help="which host to clean, must exists in your config file, use default[None] value to clean all hosts.")
-@cliutil.arg("--model", dest="model", action="store", choices=('Tn', 'Ti', 'Tu', 'Tnv'),
- help="if specified, the according config file /etc/vstf/env/{model}.json must exist.")
-@cliutil.arg("--config_file", dest="config_file", action="store", default=None,
- help="if specified, the config file will replace the default config file from /etc/vstf/env.")
+@cliutil.arg(
+ "--host",
+ dest="host",
+ action="store",
+ default=None,
+ help="which host to clean, must exists in your config file, use default[None] value to clean all hosts.")
+@cliutil.arg(
+ "--model",
+ dest="model",
+ action="store",
+ choices=(
+ 'Tn',
+ 'Ti',
+ 'Tu',
+ 'Tnv'),
+ help="if specified, the according config file /etc/vstf/env/{model}.json must exist.")
+@cliutil.arg(
+ "--config_file",
+ dest="config_file",
+ action="store",
+ default=None,
+ help="if specified, the config file will replace the default config file from /etc/vstf/env.")
def do_disapply_model(args):
"""Apply model to the host."""
- ret = call(make_msg("disapply_model", host=args.host, model=args.model, config_file=args.config_file))
+ ret = call(
+ make_msg(
+ "disapply_model",
+ host=args.host,
+ model=args.model,
+ config_file=args.config_file))
print_stdout(ret)
-@cliutil.arg("--host", dest="host", action="store", help="collect host information about cpu/mem etc")
+@cliutil.arg("--host", dest="host", action="store",
+ help="collect host information about cpu/mem etc")
def do_collect_host_info(args):
"""Show the host's CPU/MEN info"""
ret = call(make_msg("collect_host_info", target=args.host))
@@ -113,12 +169,22 @@ def do_show_tasks(args):
print_stdout(ret)
-@cliutil.arg("case", action="store", help="test case like Ti-1, Tn-1, Tnv-1, Tu-1, see case definition in documents")
+@cliutil.arg(
+ "case",
+ action="store",
+ help="test case like Ti-1, Tn-1, Tnv-1, Tu-1, see case definition in documents")
@cliutil.arg("tool", action="store", choices=cst.TOOLS)
@cliutil.arg("protocol", action="store", choices=cst.TPROTOCOLS)
@cliutil.arg("type", action="store", choices=cst.TTYPES)
-@cliutil.arg("sizes", action="store", default="64", help='test size list "64 128"')
-@cliutil.arg("--affctl", action="store_true", help="when affctl is True, it will do affctl before testing")
+@cliutil.arg(
+ "sizes",
+ action="store",
+ default="64",
+ help='test size list "64 128"')
+@cliutil.arg(
+ "--affctl",
+ action="store_true",
+ help="when affctl is True, it will do affctl before testing")
def do_perf_test(args):
"""Runs a quick single software performance test without envbuild and generating reports.
Outputs the result to the stdout immediately."""
@@ -270,7 +336,10 @@ def main():
args = parser.parse_args()
if args.func is None:
sys.exit(-1)
- setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-adm.log", clevel=logging.INFO)
+ setup_logging(
+ level=logging.DEBUG,
+ log_file="/var/log/vstf/vstf-adm.log",
+ clevel=logging.INFO)
# connect to manage
global CONN
try:
diff --git a/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/__init__.py b/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/__init__.py
index 547db686..83b8d15d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/__init__.py
+++ b/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/__init__.py
@@ -6,5 +6,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-
diff --git a/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_consumer.py b/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_consumer.py
index fb54e5db..049b86fa 100644
--- a/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_consumer.py
+++ b/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_consumer.py
@@ -57,7 +57,8 @@ class VstfConsumer(object):
self.srv = host
self.port = port
self.agent_id = agent_id
- self.url = 'amqp://' + self.user + ':' + self.passwd + '@' + self.srv + ':' + self.port + '/%2F'
+ self.url = 'amqp://' + self.user + ':' + self.passwd + \
+ '@' + self.srv + ':' + self.port + '/%2F'
# load the agent_funcs
try:
@@ -122,8 +123,10 @@ class VstfConsumer(object):
if self._closing:
self._connection.ioloop.stop()
else:
- LOGGER.warning('Connection closed, reopening in 2 seconds: (%s) %s',
- reply_code, reply_text)
+ LOGGER.warning(
+ 'Connection closed, reopening in 2 seconds: (%s) %s',
+ reply_code,
+ reply_text)
self._connection.add_timeout(2, self.reconnect)
def reconnect(self):
@@ -206,7 +209,10 @@ class VstfConsumer(object):
:param str|unicode exchange_name: The name of the exchange to declare
"""
- LOGGER.info('Declaring %s exchange %s', constant.DIRECT, constant.exchange_d)
+ LOGGER.info(
+ 'Declaring %s exchange %s',
+ constant.DIRECT,
+ constant.exchange_d)
self._channel.exchange_declare(self.on_direct_exchange_declareok,
constant.exchange_d,
constant.DIRECT)
@@ -342,14 +348,15 @@ class VstfConsumer(object):
'args': e.args}}
finally:
response = message.add_context(response, **head)
- LOGGER.debug("response the msg: head:%(h)s, body:%(b)s",
- {'h': response.get('head'), 'b': response.get('body')})
-
- respone_chanl.basic_publish(exchange=constant.exchange_d,
- routing_key=properties.reply_to,
- properties=pika.BasicProperties(correlation_id=properties.correlation_id),
- body=message.encode(response)
- )
+ LOGGER.debug("response the msg: head:%(h)s, body:%(b)s", {
+ 'h': response.get('head'), 'b': response.get('body')})
+
+ respone_chanl.basic_publish(
+ exchange=constant.exchange_d,
+ routing_key=properties.reply_to,
+ properties=pika.BasicProperties(
+ correlation_id=properties.correlation_id),
+ body=message.encode(response))
# no matter what happend, tell the mq-server to drop this msg.
self.acknowledge_message(basic_deliver.delivery_tag)
diff --git a/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_producer.py b/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_producer.py
index abf2a7fc..cb72b45d 100644
--- a/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_producer.py
+++ b/testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_producer.py
@@ -22,6 +22,7 @@ LOG = logging.getLogger(__name__)
class RpcProxy(object):
+
def __init__(self, host,
user='guest',
passwd='guest',
@@ -39,7 +40,8 @@ class RpcProxy(object):
self.passwd = passwd
self.srv = host
self.port = port
- self.url = 'amqp://' + self.user + ':' + self.passwd + '@' + self.srv + ':' + self.port + '/%2F'
+ self.url = 'amqp://' + self.user + ':' + self.passwd + \
+ '@' + self.srv + ':' + self.port + '/%2F'
try:
self.connect(host, self.setup_vstf_producer)
except Exception as e:
@@ -51,13 +53,14 @@ class RpcProxy(object):
def connect(self, host, ok_callback):
"""Create a Blocking connection to the rabbitmq-server
-
+
:param str host: the rabbitmq-server's host
:param obj ok_callback: if connect success than do this function
-
+
"""
LOG.info("Connect to the server %s", host)
- self._connection = pika.BlockingConnection(pika.URLParameters(self.url))
+ self._connection = pika.BlockingConnection(
+ pika.URLParameters(self.url))
if self._connection:
ok_callback()
@@ -80,7 +83,9 @@ class RpcProxy(object):
LOG.info("Declare queue %s and bind it to exchange %s",
self._queue, constant.exchange_d)
self._channel.queue_declare(queue=self._queue, exclusive=True)
- self._channel.queue_bind(exchange=constant.exchange_d, queue=self._queue)
+ self._channel.queue_bind(
+ exchange=constant.exchange_d,
+ queue=self._queue)
def start_consumer(self):
LOG.info("Start response consumer")
@@ -121,8 +126,8 @@ class RpcProxy(object):
self.response = None
if self.corr_id == props.correlation_id:
self.response = json.loads(body)
- LOG.debug("Proxy producer reciver the msg: head:%(h)s, body:%(b)s",
- {'h': self.response.get('head'), 'b': self.response.get('body')})
+ LOG.debug("Proxy producer reciver the msg: head:%(h)s, body:%(b)s", {
+ 'h': self.response.get('head'), 'b': self.response.get('body')})
else:
LOG.warn("Proxy producer Drop the msg "
"because of the wrong correlation id, %s\n" % body)
@@ -130,8 +135,11 @@ class RpcProxy(object):
def publish(self, target, corrid, body):
properties = pika.BasicProperties(reply_to=self._queue,
correlation_id=corrid)
- LOG.debug("start to publish message to the exchange=%s, target=%s, msg=%s"
- , constant.exchange_d, target, body)
+ LOG.debug(
+ "start to publish message to the exchange=%s, target=%s, msg=%s",
+ constant.exchange_d,
+ target,
+ body)
return self._channel.basic_publish(exchange=constant.exchange_d,
routing_key=target,
mandatory=True,
@@ -149,7 +157,7 @@ class RpcProxy(object):
queue = constant.queue_common + target
# the msg request and respone must be match by corr_id
self.corr_id = str(uuid.uuid4())
- # same msg format
+ # same msg format
msg = message.add_context(msg, corrid=self.corr_id)
# send msg to the queue
@@ -182,7 +190,7 @@ class RpcProxy(object):
# deal with exceptions
if msg_body \
and isinstance(msg_body, dict) \
- and msg_body.has_key('exception'):
+ and 'exception' in msg_body:
ename = str(msg_body['exception'].get('name'))
if hasattr(exceptions, ename):
e = getattr(exceptions, ename)()
@@ -199,6 +207,7 @@ class RpcProxy(object):
class Server(object):
+
def __init__(self, host=None,
user='guest',
passwd='guest',
@@ -206,7 +215,8 @@ class Server(object):
super(Server, self).__init__()
# Default use salt's master ip as rabbit rpc server ip
if host is None:
- raise Exception("Can not create rpc proxy because of the None rabbitmq server address.")
+ raise Exception(
+ "Can not create rpc proxy because of the None rabbitmq server address.")
self.host = host
self.port = port