summaryrefslogtreecommitdiffstats
path: root/testsuites/posca
diff options
context:
space:
mode:
Diffstat (limited to 'testsuites/posca')
-rw-r--r--testsuites/posca/testcase_cfg/posca_factor_multistack_storage.yaml35
-rw-r--r--testsuites/posca/testcase_cfg/posca_factor_multistack_storage_parallel.yaml33
-rw-r--r--testsuites/posca/testcase_cfg/posca_factor_soak_throughputs.yaml35
-rw-r--r--testsuites/posca/testcase_cfg/posca_feature_moon_resources.yaml21
-rw-r--r--testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml40
-rw-r--r--testsuites/posca/testcase_cfg/posca_feature_vnf_scale_out.yaml (renamed from testsuites/posca/testcase_cfg/posca_factor_vnf_scale_out.yaml)25
-rw-r--r--testsuites/posca/testcase_dashboard/posca_feature_moon.py121
-rw-r--r--testsuites/posca/testcase_dashboard/posca_feature_moon_dashboard.json13
-rw-r--r--testsuites/posca/testcase_dashboard/posca_feature_moon_index_pattern.json4
-rw-r--r--testsuites/posca/testcase_dashboard/posca_feature_moon_resources_histogram.json11
-rw-r--r--testsuites/posca/testcase_dashboard/posca_feature_moon_tenants_discover.json23
-rw-r--r--testsuites/posca/testcase_dashboard/posca_feature_moon_tenants_histogram.json11
-rw-r--r--testsuites/posca/testcase_dashboard/posca_moon_resources.py36
-rw-r--r--testsuites/posca/testcase_dashboard/posca_moon_tenants.py36
-rw-r--r--testsuites/posca/testcase_dashboard/posca_stress_ping.py2
-rwxr-xr-xtestsuites/posca/testcase_dashboard/posca_vnf_scale_out.py35
-rwxr-xr-xtestsuites/posca/testcase_dashboard/system_bandwidth.py2
-rw-r--r--testsuites/posca/testcase_script/posca_factor_multistack_storage.py236
-rw-r--r--testsuites/posca/testcase_script/posca_factor_multistack_storage_parallel.py164
-rw-r--r--testsuites/posca/testcase_script/posca_factor_ping.py18
-rw-r--r--testsuites/posca/testcase_script/posca_factor_soak_throughputs.py192
-rw-r--r--testsuites/posca/testcase_script/posca_factor_system_bandwidth.py4
-rw-r--r--testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py125
-rw-r--r--testsuites/posca/testcase_script/posca_feature_moon_resources.py59
-rw-r--r--testsuites/posca/testcase_script/posca_feature_moon_tenants.py158
-rw-r--r--testsuites/posca/testcase_script/posca_feature_testpmd_scale_up.py5
-rw-r--r--testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py143
27 files changed, 1405 insertions, 182 deletions
diff --git a/testsuites/posca/testcase_cfg/posca_factor_multistack_storage.yaml b/testsuites/posca/testcase_cfg/posca_factor_multistack_storage.yaml
new file mode 100644
index 00000000..e2f48438
--- /dev/null
+++ b/testsuites/posca/testcase_cfg/posca_factor_multistack_storage.yaml
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+load_manager:
+ scenarios:
+ tool: fio
+ test_times: 10
+ #this "rw" value could be write read rw rr or randrw
+ rw: 'randrw'
+ bs: '4k'
+ size: '20g'
+ rwmixwrite: '50'
+ num_thread: 1, 2
+ num_stack: 1
+ volume_num: '1'
+ num_jobs: '1'
+ direct: '1'
+ volume_size: 50
+
+ runners:
+ stack_create: yardstick
+ flavor:
+ yardstick_test_dir: "samples"
+ yardstick_testcase: "storage_bottlenecks"
+
+contexts:
+ dashboard: "Bottlenecks-ELK"
+ yardstick: "Bottlenecks-Yardstick"
+ yardstick_envpre: True
diff --git a/testsuites/posca/testcase_cfg/posca_factor_multistack_storage_parallel.yaml b/testsuites/posca/testcase_cfg/posca_factor_multistack_storage_parallel.yaml
new file mode 100644
index 00000000..b55b826c
--- /dev/null
+++ b/testsuites/posca/testcase_cfg/posca_factor_multistack_storage_parallel.yaml
@@ -0,0 +1,33 @@
+##############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+load_manager:
+ scenarios:
+ tool: fio
+ # for this option we provide " write, read, rw, rr"
+ rw: "randrw"
+ bs: "4k"
+ size: "20g"
+ rwmixwrite: "50"
+ num_stack: 1, 3
+ volume_num: "1"
+ num_jobs: "1"
+ direct: "1"
+ volume_size: 50
+
+ runners:
+ stack_create: yardstick
+ flavor:
+ yardstick_test_dir: "samples"
+ yardstick_testcase: "storage_bottlenecks"
+
+contexts:
+ dashboard: "Bottlenecks-ELK"
+ yardstick: "Bottlenecks-Yardstick"
+ yardstick_envpre: True
diff --git a/testsuites/posca/testcase_cfg/posca_factor_soak_throughputs.yaml b/testsuites/posca/testcase_cfg/posca_factor_soak_throughputs.yaml
new file mode 100644
index 00000000..983b7d76
--- /dev/null
+++ b/testsuites/posca/testcase_cfg/posca_factor_soak_throughputs.yaml
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Sample config file for life-cycle throuphputs baseline test
+# Each vm pair will have its ttl (time to live) and creation delay
+# (lazy creation delay) specified.
+# Multiple context are used to specify the host and target VMs.
+
+load_manager:
+ scenarios:
+ tool: netperf
+ test_duration_hours: 0.1
+ vim_pair_ttl: 10
+ vim_pair_lazy_cre_delay: 120
+ package_size:
+ threshhold:
+ package_loss: 0%
+ latency: 300
+
+ runners:
+ stack_create: yardstick
+ flavor:
+ yardstick_test_dir: "samples"
+ yardstick_testcase: "netperf_soak"
+
+contexts:
+ dashboard: "Bottlenecks-ELK"
+ yardstick: "Bottlenecks-Yardstick"
+ yardstick_envpre: True
diff --git a/testsuites/posca/testcase_cfg/posca_feature_moon_resources.yaml b/testsuites/posca/testcase_cfg/posca_feature_moon_resources.yaml
index d6b325f7..bbf65ba7 100644
--- a/testsuites/posca/testcase_cfg/posca_feature_moon_resources.yaml
+++ b/testsuites/posca/testcase_cfg/posca_feature_moon_resources.yaml
@@ -11,20 +11,27 @@ load_manager:
scenarios:
tool: https request
# info that the cpus and memes have the same number of data.
+ pdp_name: pdp
+ policy_name: "MLS Policy example"
+ model_name: MLS
tenants: 1,5,10,20
+ subject_number: 10
+ object_number: 10
+ timeout: 0.2
runners:
stack_create: yardstick
Debug: False
yardstick_test_dir: "samples"
- yardstick_testcase: "bottlenecks_moon_resources"
-
- runner_exta:
- # info this section is for yardstick do some exta env prepare.
- installation_method: yardstick
- installation_type: testpmd
+ yardstick_testcase: "moon_resource"
contexts:
# info that dashboard if have data, we will create the data dashboard.
dashboard: "Bottlenecks-ELK"
- yardstick: "Bottlenecks-yardstick" \ No newline at end of file
+ yardstick: "Bottlenecks-yardstick"
+ moon_monitoring: True
+ moon_environment:
+ ip: "192.168.37.205"
+ user: "root"
+ password: "root"
+ consul_port: 30005
diff --git a/testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml b/testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml
new file mode 100644
index 00000000..7feb6e4e
--- /dev/null
+++ b/testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml
@@ -0,0 +1,40 @@
+##############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+load_manager:
+ scenarios:
+ tool: https request
+ # info that the cpus and memes have the same number of data.
+ pdp_name: pdp
+ policy_name: "MLS Policy example"
+ model_name: MLS
+ subject_number: 20
+ object_number: 20
+ timeout: 0.003
+ initial_tenants: 0
+ steps_tenants: 1
+ tolerate_time: 20
+ SLA: 5
+
+ runners:
+ stack_create: yardstick
+ Debug: False
+ yardstick_test_dir: "samples"
+ yardstick_testcase: "moon_tenant"
+
+contexts:
+ # info that dashboard if have data, we will create the data dashboard.
+ dashboard: "Bottlenecks-ELK"
+ yardstick: "Bottlenecks-yardstick"
+ moon_monitoring: True
+ moon_environment:
+ ip: "192.168.37.205"
+ user: "root"
+ password: "root"
+ consul_port: 30005
diff --git a/testsuites/posca/testcase_cfg/posca_factor_vnf_scale_out.yaml b/testsuites/posca/testcase_cfg/posca_feature_vnf_scale_out.yaml
index 84bde99d..d893ac8a 100644
--- a/testsuites/posca/testcase_cfg/posca_factor_vnf_scale_out.yaml
+++ b/testsuites/posca/testcase_cfg/posca_feature_vnf_scale_out.yaml
@@ -7,12 +7,19 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-test_config:
- num_vnfs: [1, 40]
-runner_config:
- dashboard: "y"
- dashboard_ip:
- stack_create: yardstick
- yardstick_test_ip:
- yardstick_test_dir: "samples/vnf_samples/nsut/acl"
- yardstick_testcase: "tc_heat_rfc2544_ipv4_1rule_1flow_64B_packetsize_scale_out.yaml"
+load_manager:
+ scenarios:
+ number_vnfs: 1, 2, 4
+ iterations: 10
+ interval: 35
+
+ runners:
+ stack_create: yardstick
+ flavor:
+ yardstick_test_dir: "samples/vnf_samples/nsut/acl"
+ yardstick_testcase: "tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_correlated_traffic_scale_out"
+
+contexts:
+ dashboard: "Bottlenecks-ELK"
+ yardstick: "Bottlenecks_yardstick"
+ yardstick_envpre: False
diff --git a/testsuites/posca/testcase_dashboard/posca_feature_moon.py b/testsuites/posca/testcase_dashboard/posca_feature_moon.py
new file mode 100644
index 00000000..6819ea84
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_feature_moon.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize a function of creating dashboard of stress ping test'''
+import ConfigParser
+from elasticsearch import Elasticsearch
+import json
+import os
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+
+LOG = log.Logger(__name__).getLogger()
+config = ConfigParser.ConfigParser()
+es = Elasticsearch()
+dashboard_path = os.path.join(conf_parser.test_dir,
+ "posca",
+ "testcase_dashboard")
+dashboard_dir = dashboard_path + "/"
+
+
+def dashboard_send_data(runner_config, test_data):
+ global es
+ print runner_config
+ es_ip = runner_config['dashboard_ip'].split(':')
+ es = Elasticsearch([{'host': es_ip[0]}])
+ print test_data["test_body"]
+ res = es.index(index="bottlenecks",
+ doc_type=test_data["testcase"],
+ body=test_data["test_body"][0])
+ if res['created'] == "False":
+ LOG.error("date send to kibana have errors ", test_data["data_body"])
+
+
+def posca_moon_init(runner_config):
+ global es
+ es_ip = runner_config['dashboard_ip'].split(':')
+ es = Elasticsearch([{'host': es_ip[0]}])
+ # Create bottlenecks index
+ with open(dashboard_dir + 'posca_feature_moon_index_pattern.json')\
+ as index_pattern:
+ doc = json.load(index_pattern)
+ res = es.index(
+ index=".kibana",
+ doc_type="index-pattern",
+ id="bottlenecks",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("bottlenecks index-pattern has created")
+ else:
+ LOG.info("bottlenecks index-pattern has existed")
+
+ with open(dashboard_dir + 'posca_system_bandwidth_config.json')\
+ as index_config:
+ doc = json.load(index_config)
+ res = es.index(index=".kibana", doc_type="config", id="4.6.1", body=doc)
+ if res['created'] == "True":
+ LOG.info("bottlenecks config has created")
+ else:
+ LOG.info("bottlenecks config has existed")
+
+ # Configure discover panel
+ with open(dashboard_dir + 'posca_feature_moon_discover.json')\
+ as index_discover:
+ doc = json.load(index_discover)
+ res = es.index(
+ index=".kibana",
+ doc_type="search",
+ id="moon",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("moon testcase search has created")
+ else:
+ LOG.info("moon testcase search has existed")
+
+ # Create testing data in line graph
+ with open(dashboard_dir + 'posca_feature_moon_resources_histogram.json')\
+ as line_data:
+ doc = json.load(line_data)
+ res = es.index(
+ index=".kibana",
+ doc_type="visualization",
+ id="resources",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("moon resources visualization has created")
+ else:
+ LOG.info("moon resources visualization has existed")
+
+ # Create comparison results in line chart
+ with open(dashboard_dir + 'posca_feature_moon_tenants_histogram.json')\
+ as line_char:
+ doc = json.load(line_char)
+ res = es.index(
+ index=".kibana",
+ doc_type="visualization",
+ id="tenants",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("moon tenants visualization has created")
+ else:
+ LOG.info("moon tenants visualization has existed")
+
+ # Create dashboard
+ with open(dashboard_dir + 'posca_feature_moon_dashboard.json')\
+ as dashboard:
+ doc = json.load(dashboard)
+ res = es.index(
+ index=".kibana",
+ doc_type="dashboard",
+ id="moon",
+ body=doc)
+ if res['created'] == "True":
+ LOG.info("moon testcases dashboard has created")
+ else:
+ LOG.info("moon testcases dashboard has existed")
diff --git a/testsuites/posca/testcase_dashboard/posca_feature_moon_dashboard.json b/testsuites/posca/testcase_dashboard/posca_feature_moon_dashboard.json
new file mode 100644
index 00000000..53a4a750
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_feature_moon_dashboard.json
@@ -0,0 +1,13 @@
+{
+ "title": "moon",
+ "hits": 0,
+ "description": "",
+ "panelsJSON": "[{\"id\":\"resources\",\"type\":\"visualization\",\"panelIndex\":1,\"size_x\":8,\"size_y\":7,\"col\":1,\"row\":1},{\"id\":\"tenants\",\"type\":\"visualization\",\"panelIndex\":2,\"size_x\":3,\"size_y\":7,\"col\":9,\"row\":1}]",
+ "optionsJSON": "{\"darkTheme\":false}",
+ "uiStateJSON": "{}",
+ "version": 1,
+ "timeRestore": false,
+ "kibanaSavedObjectMeta": {
+ "searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}}}]}"
+ }
+} \ No newline at end of file
diff --git a/testsuites/posca/testcase_dashboard/posca_feature_moon_index_pattern.json b/testsuites/posca/testcase_dashboard/posca_feature_moon_index_pattern.json
new file mode 100644
index 00000000..2bff871a
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_feature_moon_index_pattern.json
@@ -0,0 +1,4 @@
+{
+ "title": "bottlenecks",
+ "fields": "[{\"name\":\"_index\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"indexed\":false,\"analyzed\":false,\"doc_values\":false},{\"name\":\"tenant_max\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"indexed\":true,\"analyzed\":false,\"doc_values\":true},{\"name\":\"_source\",\"type\":\"_source\",\"count\":0,\"scripted\":false,\"indexed\":false,\"analyzed\":false,\"doc_values\":false},{\"name\":\"max_user\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"indexed\":true,\"analyzed\":false,\"doc_values\":true},{\"name\":\"tenant_number\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"indexed\":true,\"analyzed\":false,\"doc_values\":true},{\"name\":\"_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"indexed\":false,\"analyzed\":false,\"doc_values\":false},{\"name\":\"_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"indexed\":false,\"analyzed\":false,\"doc_values\":false},{\"name\":\"_score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"indexed\":false,\"analyzed\":false,\"doc_values\":false}]"
+} \ No newline at end of file
diff --git a/testsuites/posca/testcase_dashboard/posca_feature_moon_resources_histogram.json b/testsuites/posca/testcase_dashboard/posca_feature_moon_resources_histogram.json
new file mode 100644
index 00000000..c8977a72
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_feature_moon_resources_histogram.json
@@ -0,0 +1,11 @@
+{
+ "title": "resources",
+ "visState": "{\"title\":\"New Visualization\",\"type\":\"histogram\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"scale\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"type\":\"sum\",\"schema\":\"metric\",\"params\":{\"field\":\"max_user\"}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"tenant_number\",\"size\":5,\"order\":\"asc\",\"orderBy\":\"_term\"}}],\"listeners\":{}}",
+ "uiStateJSON": "{}",
+ "description": "",
+ "savedSearchId": "moon",
+ "version": 1,
+ "kibanaSavedObjectMeta": {
+ "searchSourceJSON": "{\"filter\":[]}"
+ }
+} \ No newline at end of file
diff --git a/testsuites/posca/testcase_dashboard/posca_feature_moon_tenants_discover.json b/testsuites/posca/testcase_dashboard/posca_feature_moon_tenants_discover.json
new file mode 100644
index 00000000..03360f89
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_feature_moon_tenants_discover.json
@@ -0,0 +1,23 @@
+{
+ "_index": ".kibana",
+ "_type": "search",
+ "_id": "tenants",
+ "_version": 1,
+ "found": true,
+ "_source": {
+ "title": "tenants",
+ "description": "",
+ "hits": 0,
+ "columns": [
+ "_source"
+ ],
+ "sort": [
+ "_score",
+ "desc"
+ ],
+ "version": 1,
+ "kibanaSavedObjectMeta": {
+ "searchSourceJSON": "{\"index\":\"bottlenecks\",\"filter\":[],\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}},\"require_field_match\":false,\"fragment_size\":2147483647},\"query\":{\"query_string\":{\"query\":\"_type:posca_factor_moon_tenants\",\"analyze_wildcard\":true}}}"
+ }
+ }
+} \ No newline at end of file
diff --git a/testsuites/posca/testcase_dashboard/posca_feature_moon_tenants_histogram.json b/testsuites/posca/testcase_dashboard/posca_feature_moon_tenants_histogram.json
new file mode 100644
index 00000000..a731acfc
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_feature_moon_tenants_histogram.json
@@ -0,0 +1,11 @@
+{
+ "title": "tenants",
+ "visState": "{\"title\":\"New Visualization\",\"type\":\"histogram\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"scale\":\"linear\",\"mode\":\"stacked\",\"times\":[],\"addTimeMarker\":false,\"defaultYExtents\":false,\"setYExtents\":false,\"yAxis\":{}},\"aggs\":[{\"id\":\"1\",\"type\":\"sum\",\"schema\":\"metric\",\"params\":{\"field\":\"tenant_max\"}}],\"listeners\":{}}",
+ "uiStateJSON": "{}",
+ "description": "",
+ "savedSearchId": "moon",
+ "version": 1,
+ "kibanaSavedObjectMeta": {
+ "searchSourceJSON": "{\"filter\":[]}"
+ }
+} \ No newline at end of file
diff --git a/testsuites/posca/testcase_dashboard/posca_moon_resources.py b/testsuites/posca/testcase_dashboard/posca_moon_resources.py
new file mode 100644
index 00000000..446faccc
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_moon_resources.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize a function of creating dashboard of stress ping test'''
+import ConfigParser
+from elasticsearch import Elasticsearch
+import os
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+
+LOG = log.Logger(__name__).getLogger()
+config = ConfigParser.ConfigParser()
+es = Elasticsearch()
+dashboard_path = os.path.join(conf_parser.test_dir,
+ "posca",
+ "testcase_dashboard")
+dashboard_dir = dashboard_path + "/"
+
+
+def dashboard_send_data(runner_config, test_data):
+ global es
+ print runner_config
+ es_ip = runner_config['dashboard_ip'].split(':')
+ es = Elasticsearch([{'host': es_ip[0]}])
+ print test_data["test_body"]
+ res = es.index(index="bottlenecks",
+ doc_type=test_data["testcase"],
+ body=test_data["test_body"][0])
+ if res['created'] == "False":
+ LOG.error("date send to kibana have errors ", test_data["data_body"])
diff --git a/testsuites/posca/testcase_dashboard/posca_moon_tenants.py b/testsuites/posca/testcase_dashboard/posca_moon_tenants.py
new file mode 100644
index 00000000..446faccc
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_moon_tenants.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize a function of creating dashboard of stress ping test'''
+import ConfigParser
+from elasticsearch import Elasticsearch
+import os
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+
+LOG = log.Logger(__name__).getLogger()
+config = ConfigParser.ConfigParser()
+es = Elasticsearch()
+dashboard_path = os.path.join(conf_parser.test_dir,
+ "posca",
+ "testcase_dashboard")
+dashboard_dir = dashboard_path + "/"
+
+
+def dashboard_send_data(runner_config, test_data):
+ global es
+ print runner_config
+ es_ip = runner_config['dashboard_ip'].split(':')
+ es = Elasticsearch([{'host': es_ip[0]}])
+ print test_data["test_body"]
+ res = es.index(index="bottlenecks",
+ doc_type=test_data["testcase"],
+ body=test_data["test_body"][0])
+ if res['created'] == "False":
+ LOG.error("date send to kibana have errors ", test_data["data_body"])
diff --git a/testsuites/posca/testcase_dashboard/posca_stress_ping.py b/testsuites/posca/testcase_dashboard/posca_stress_ping.py
index 7a5a8fb8..64ce3835 100644
--- a/testsuites/posca/testcase_dashboard/posca_stress_ping.py
+++ b/testsuites/posca/testcase_dashboard/posca_stress_ping.py
@@ -32,7 +32,7 @@ def dashboard_send_data(runner_config, test_data):
doc_type=test_data["testcase"],
body=test_data["data_body"])
if res['created'] == "False":
- LOG.error("date send to kibana have errors ", test_data["data_body"])
+ LOG.error("date send to kibana have errors %s", test_data["data_body"])
def posca_stress_ping(runner_config):
diff --git a/testsuites/posca/testcase_dashboard/posca_vnf_scale_out.py b/testsuites/posca/testcase_dashboard/posca_vnf_scale_out.py
new file mode 100755
index 00000000..6720b7f0
--- /dev/null
+++ b/testsuites/posca/testcase_dashboard/posca_vnf_scale_out.py
@@ -0,0 +1,35 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import ConfigParser
+from elasticsearch import Elasticsearch
+import os
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+
+LOG = log.Logger(__name__).getLogger()
+config = ConfigParser.ConfigParser()
+es = Elasticsearch()
+dashboard_path = os.path.join(conf_parser.test_dir,
+ "posca",
+ "testcase_dashboard")
+dashboard_dir = dashboard_path + "/"
+
+
+def dashboard_send_data(runner_config, test_data):
+ global es
+ # es_ip = runner_config['dashboard_ip'].split(':')
+ es = Elasticsearch([{'host': "172.17.0.5"}])
+ for i in test_data:
+ res = es.index(index="bottlenecks",
+ doc_type="vnf_scale_out",
+ body=i)
+ if res['created'] == "False":
+ LOG.error("date send to kibana have errors %s",
+ test_data["data_body"])
diff --git a/testsuites/posca/testcase_dashboard/system_bandwidth.py b/testsuites/posca/testcase_dashboard/system_bandwidth.py
index 4501dee7..5479b670 100755
--- a/testsuites/posca/testcase_dashboard/system_bandwidth.py
+++ b/testsuites/posca/testcase_dashboard/system_bandwidth.py
@@ -31,7 +31,7 @@ def dashboard_send_data(runner_config, test_data):
doc_type=test_data["testcase"],
body=test_data["data_body"])
if res['created'] == "False":
- LOG.error("date send to kibana have errors ", test_data["data_body"])
+ LOG.error("date send to kibana have errors %s", test_data["data_body"])
def dashboard_system_bandwidth(runner_config):
diff --git a/testsuites/posca/testcase_script/posca_factor_multistack_storage.py b/testsuites/posca/testcase_script/posca_factor_multistack_storage.py
new file mode 100644
index 00000000..34ee225c
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_factor_multistack_storage.py
@@ -0,0 +1,236 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize the function of run posca multistack storage stress test
+This file contain several part:
+First is create a script to realize several threading run'''
+
+import utils.logger as log
+import uuid
+import json
+import os
+import time
+import threading
+import datetime
+import Queue
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.quota_prepare as quota_prepare
+import utils.env_prepare.stack_prepare as stack_prepare
+import utils.infra_setup.runner.yardstick as yardstick_task
+
+import utils.infra_setup.runner.docker_env as docker_env
+
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+test_dict = {
+ "action": "runTestCase",
+ "args": {
+ "opts": {
+ "task-args": {}
+ },
+ "testcase": "multistack_storage_bottlenecks"
+ }
+}
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+cidr = "/home/opnfv/repos/yardstick/samples/storage_bottlenecks.yaml"
+runner_DEBUG = True
+q = Queue.Queue()
+final_result = Queue.Queue()
+
+
+def env_pre(test_config):
+ test_yardstick = False
+ if "yardstick" in test_config["contexts"].keys():
+ test_yardstick = True
+ stack_prepare._prepare_env_daemon(test_yardstick)
+ quota_prepare.quota_env_prepare()
+ LOG.info("yardstick environment prepare!")
+ if(test_config["contexts"]['yardstick_envpre']):
+ stdout = yardstick_task.yardstick_image_prepare()
+ LOG.debug(stdout)
+
+
+def testcase_parser(out_file="yardstick.out", **parameter_info):
+ cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+ cidr=cidr,
+ outfile=out_file,
+ parameter=parameter_info)
+ return cmd
+
+
+def do_test(test_config):
+ out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+ yardstick_container = docker_env.yardstick_info['container']
+ cmd = testcase_parser(out_file=out_file, **test_config)
+ print(cmd)
+ stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ LOG.info(stdout)
+ loop_value = 0
+ while loop_value < 60:
+ time.sleep(2)
+ loop_value = loop_value + 1
+ with open(out_file) as f:
+ data = json.load(f)
+ if data["result"]["criteria"] == "PASS":
+ LOG.info("yardstick run success")
+ LOG.info("%s" % data["result"]["testcases"])
+ break
+ else:
+ LOG.error("yardstick error exit")
+ break
+
+ save_data = final_config_to_result(test_config, data)
+ return save_data
+
+
+def config_to_result(num, out_num, during_date):
+ testdata = {}
+ test_result = {}
+ final_data = {}
+
+ final_data["read_iops"] = 0
+ final_data["read_bw"] = 0
+ final_data["read_lat"] = 0
+ final_data["write_iops"] = 0
+ final_data["write_bw"] = 0
+ final_data["write_lat"] = 0
+
+ test_result["number_of_stacks"] = float(num)
+ test_result["success_times"] = out_num
+ test_result["success_rate"] = out_num / num
+ test_result["duration_time"] = during_date
+ testdata["data_body"] = test_result
+ testdata["testcase"] = testcase
+
+ while not final_result.empty():
+ data = final_result.get()
+ final_data["read_iops"] += data["read_iops"]
+ final_data["read_bw"] += data["read_bw"]
+ if final_data["read_lat"] is 0:
+ final_data["read_lat"] = data["read_lat"]
+ else:
+ final_data["read_lat"] += data["read_lat"]
+ final_data["read_lat"] = final_data["read_lat"]/2
+ final_data["write_iops"] += data["write_iops"]
+ final_data["write_bw"] += data["read_iops"]
+ if final_data["write_lat"] is 0:
+ final_data["write_lat"] = data["write_lat"]
+ else:
+ final_data["write_lat"] += data["write_lat"]
+ final_data["write_lat"] = final_data["write_lat"]/2
+
+ testdata["test_value"] = final_data
+ LOG.info("Final testdata is %s" % testdata)
+ return testdata
+
+
+def final_config_to_result(test_config, test_result):
+ out_data = test_result["result"]["testcases"]
+ test_data = out_data["storage_bottlenecks"]["tc_data"]
+ testdata = {}
+ testdata["read_iops"] = 0
+ testdata["read_bw"] = 0
+ testdata["read_lat"] = 0
+ testdata["write_iops"] = 0
+ testdata["write_bw"] = 0
+ testdata["write_lat"] = 0
+ print(testdata["read_iops"])
+ for result in test_data:
+ testdata["read_iops"] += result["data"]["read_iops"]
+ testdata["read_bw"] += result["data"]["read_bw"]
+ if testdata["read_lat"] is 0:
+ testdata["read_lat"] = result["data"]["read_lat"]
+ else:
+ testdata["read_lat"] += result["data"]["read_lat"]
+ testdata["read_lat"] = testdata["read_lat"]/2
+ testdata["write_iops"] += result["data"]["write_iops"]
+ testdata["write_bw"] += result["data"]["write_bw"]
+ if testdata["write_lat"] is 0:
+ testdata["write_lat"] = result["data"]["write_lat"]
+ else:
+ testdata["write_lat"] += result["data"]["write_lat"]
+ testdata["write_lat"] = testdata["write_lat"]/2
+ final_result.put(testdata)
+ q.put(1)
+ return testdata
+
+
+def func_run(con_dic):
+ test_date = do_test(con_dic)
+ return test_date
+
+
+def run(test_config):
+ con_dic = test_config["load_manager"]
+ scenarios_conf = con_dic["scenarios"]
+
+ if test_config["contexts"]["yardstick_ip"] is None:
+ con_dic["contexts"]["yardstick_ip"] =\
+ conf_parser.ip_parser("yardstick_test_ip")
+
+ env_pre(test_config)
+ LOG.info("yardstick environment prepare done!")
+
+ stack_num = scenarios_conf["num_stack"]
+ test_num = conf_parser.str_to_list(scenarios_conf["num_thread"])
+ rw = scenarios_conf["rw"]
+ bs = scenarios_conf["bs"]
+ size = scenarios_conf["size"]
+ rwmixwrite = scenarios_conf["rwmixwrite"]
+ numjobs = scenarios_conf["num_jobs"]
+ direct = scenarios_conf["direct"]
+ volume_num = scenarios_conf["volume_num"]
+ volume_size = scenarios_conf["volume_size"]
+
+ for value in test_num:
+ result = []
+ out_num = 0
+ num = int(value)
+ # pool = multiprocessing.Pool(processes=num)
+ threadings = []
+ LOG.info("begin to run %s thread" % num)
+
+ starttime = datetime.datetime.now()
+
+ for i in xrange(0, num):
+ case_config = {"stack_num": int(stack_num),
+ "volume_num": volume_num,
+ "rw": rw,
+ "bs": bs,
+ "size": size,
+ "rwmixwrite": rwmixwrite,
+ "numjobs": numjobs,
+ "direct": direct,
+ "volume_size": int(volume_size)}
+ tmp_thread = threading.Thread(target=func_run, args=(case_config,))
+ threadings.append(tmp_thread)
+ tmp_thread.start()
+
+ for one_thread in threadings:
+ one_thread.join()
+ while not q.empty():
+ result.append(q.get())
+ for item in result:
+ out_num = out_num + float(item)
+
+ print(result)
+
+ endtime = datetime.datetime.now()
+ LOG.info("%s thread success %d times" % (num, out_num))
+ during_date = (endtime - starttime).seconds
+
+ data_reply = config_to_result(num, out_num, during_date)
+ conf_parser.result_to_file(data_reply, test_config["out_file"])
+
+ LOG.info('END POSCA stress multistack storage test')
+ return data_reply
diff --git a/testsuites/posca/testcase_script/posca_factor_multistack_storage_parallel.py b/testsuites/posca/testcase_script/posca_factor_multistack_storage_parallel.py
new file mode 100644
index 00000000..8c623d41
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_factor_multistack_storage_parallel.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize the function of run posca multistack storage stress test
+This file contain several part:
+First is create a script to realize several threading run'''
+
+import utils.logger as log
+import uuid
+import json
+import os
+import time
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.quota_prepare as quota_prepare
+import utils.env_prepare.stack_prepare as stack_prepare
+import utils.infra_setup.runner.yardstick as yardstick_task
+
+import utils.infra_setup.runner.docker_env as docker_env
+
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+test_dict = {
+ "action": "runTestCase",
+ "args": {
+ "opts": {
+ "task-args": {}
+ },
+ "testcase": "multistack_storage_bottlenecks_parallel"
+ }
+}
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+cidr = "/home/opnfv/repos/yardstick/samples/storage_bottlenecks.yaml"
+runner_DEBUG = True
+
+
+def env_pre(test_config):
+ test_yardstick = False
+ if "yardstick" in test_config["contexts"].keys():
+ test_yardstick = True
+ stack_prepare._prepare_env_daemon(test_yardstick)
+ quota_prepare.quota_env_prepare()
+ if(test_config["contexts"]['yardstick_envpre']):
+ LOG.info("yardstick environment prepare!")
+ stdout = yardstick_task.yardstick_image_prepare()
+ LOG.debug(stdout)
+
+
+def testcase_parser(out_file="yardstick.out", **parameter_info):
+ cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+ cidr=cidr,
+ outfile=out_file,
+ parameter=parameter_info)
+ return cmd
+
+
+def do_test(test_config):
+ out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+ yardstick_container = docker_env.yardstick_info['container']
+ cmd = testcase_parser(out_file=out_file, **test_config)
+ print(cmd)
+ stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ LOG.info(stdout)
+ loop_value = 0
+ while loop_value < 60:
+ time.sleep(2)
+ loop_value = loop_value + 1
+ with open(out_file) as f:
+ data = json.load(f)
+ if data["result"]["criteria"] == "PASS":
+ LOG.info("yardstick run success")
+ LOG.info("%s" % data["result"]["testcases"])
+ break
+ else:
+ LOG.error("yardstick error exit")
+ break
+
+ save_data = config_to_result(test_config, data)
+ LOG.info(save_data)
+ return save_data
+
+
+def config_to_result(test_config, test_result):
+ print(test_result)
+ out_data = test_result["result"]["testcases"]
+ test_data = out_data["storage_bottlenecks"]["tc_data"]
+ testdata = {}
+ testdata["read_iops"] = 0
+ testdata["read_bw"] = 0
+ testdata["read_lat"] = 0
+ testdata["write_iops"] = 0
+ testdata["write_bw"] = 0
+ testdata["write_lat"] = 0
+ print(testdata["read_iops"])
+ for result in test_data:
+ testdata["read_iops"] += result["data"]["read_iops"]
+ testdata["read_bw"] += result["data"]["read_bw"]
+ if testdata["read_lat"] is 0:
+ testdata["read_lat"] = result["data"]["read_lat"]
+ else:
+ testdata["read_lat"] += result["data"]["read_lat"]
+ testdata["read_lat"] = testdata["read_lat"]/2
+ testdata["write_iops"] += result["data"]["write_iops"]
+ testdata["write_bw"] += result["data"]["write_bw"]
+ if testdata["write_lat"] is 0:
+ testdata["write_lat"] = result["data"]["write_lat"]
+ else:
+ testdata["write_lat"] += result["data"]["write_lat"]
+ testdata["write_lat"] = testdata["write_lat"]/2
+ return testdata
+
+
+def run(test_config):
+ con_dic = test_config["load_manager"]
+ scenarios_conf = con_dic["scenarios"]
+
+ if test_config["contexts"]["yardstick_ip"] is None:
+ con_dic["contexts"]["yardstick_ip"] =\
+ conf_parser.ip_parser("yardstick_test_ip")
+
+ env_pre(test_config)
+ LOG.info("yardstick environment prepare done!")
+
+ test_num = conf_parser.str_to_list(scenarios_conf["num_stack"])
+ rw = scenarios_conf["rw"]
+ bs = scenarios_conf["bs"]
+ size = scenarios_conf["size"]
+ rwmixwrite = scenarios_conf["rwmixwrite"]
+ numjobs = scenarios_conf["num_jobs"]
+ direct = scenarios_conf["direct"]
+ volume_num = scenarios_conf["volume_num"]
+ volume_size = scenarios_conf["volume_size"]
+
+ result = []
+
+ for value in test_num:
+ case_config = {"stack_num": int(value),
+ "volume_num": volume_num,
+ "rw": rw,
+ "bs": bs,
+ "size": size,
+ "rwmixwrite": rwmixwrite,
+ "numjobs": numjobs,
+ "direct": direct,
+ "volume_size": int(volume_size)}
+ data_reply = do_test(case_config)
+ result.append(data_reply)
+
+ LOG.info("%s stack successful run" % (value))
+
+ conf_parser.result_to_file(data_reply, test_config["out_file"])
+
+ LOG.info('END POSCA stress multistack storage parallel testcase')
+ LOG.info("The result data is %s", result)
+ return result
diff --git a/testsuites/posca/testcase_script/posca_factor_ping.py b/testsuites/posca/testcase_script/posca_factor_ping.py
index 3a2277cf..4ee43964 100644
--- a/testsuites/posca/testcase_script/posca_factor_ping.py
+++ b/testsuites/posca/testcase_script/posca_factor_ping.py
@@ -23,6 +23,7 @@ import Queue
from utils.parser import Parser as conf_parser
import utils.env_prepare.quota_prepare as quota_prepare
import utils.env_prepare.stack_prepare as stack_prepare
+import utils.infra_setup.runner.yardstick as runner_yardstick
import testsuites.posca.testcase_dashboard.posca_stress_ping as DashBoard
import utils.infra_setup.runner.docker_env as docker_env
@@ -42,6 +43,8 @@ test_dict = {
}
testfile = os.path.basename(__file__)
testcase, file_format = os.path.splitext(testfile)
+cidr = "/home/opnfv/repos/yardstick/samples/ping_bottlenecks.yaml"
+runner_DEBUG = True
q = Queue.Queue()
@@ -52,20 +55,21 @@ def env_pre(test_config):
test_yardstick = True
stack_prepare._prepare_env_daemon(test_yardstick)
quota_prepare.quota_env_prepare()
- cmd = ('yardstick env prepare')
LOG.info("yardstick environment prepare!")
if(test_config["contexts"]['yardstick_envpre']):
- yardstick_container = docker_env.yardstick_info['container']
- stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ stdout = runner_yardstick.yardstick_image_prepare()
LOG.debug(stdout)
def do_test():
func_name = sys._getframe().f_code.co_name
out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+ parameter_info = {}
yardstick_container = docker_env.yardstick_info['container']
- cmd = ('yardstick task start /home/opnfv/repos/yardstick/'
- 'samples/ping_bottlenecks.yaml --output-file ' + out_file)
+ cmd = runner_yardstick.yardstick_command_parser(debug=runner_DEBUG,
+ cidr=cidr,
+ outfile=out_file,
+ parameter=parameter_info)
stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
LOG.info(stdout)
out_value = 0
@@ -75,11 +79,11 @@ def do_test():
loop_value = loop_value + 1
with open(out_file) as f:
data = json.load(f)
- if data["status"] == 1:
+ if data["result"]["criteria"] == "PASS":
LOG.info("yardstick run success")
out_value = 1
break
- elif data["status"] == 2:
+ else:
LOG.error("yardstick error exit")
out_value = 0
break
diff --git a/testsuites/posca/testcase_script/posca_factor_soak_throughputs.py b/testsuites/posca/testcase_script/posca_factor_soak_throughputs.py
new file mode 100644
index 00000000..2fd35006
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_factor_soak_throughputs.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file is to do data-plane baseline test for
+VM pair life-cycle events using netperf.
+Testing steps are summarized below:
+1. run_test load testcase configuration
+2. Bottlenecks eliminates the environments limits/constraints
+3. Bottlenecks tells Yardstick to prepare environment
+4. Bottlenecks tells Yardstick to run test
+ 3.1 to create stack
+ 3.2 to install netperf
+ 3.3 to send/forward packets for t2 seconds
+ 3.4 record results and detroy stack
+ 3.4 after every t1 seconds goto 3.1 and repeat the workflow
+5. Bottlenecks collects testing results from Yardstick
+6. Bottlenecks tells Yardstick to stop when time ends
+ or system fails the test
+7. Bottlenecks sends testing data to bottlenecks-elk'''
+
+import utils.logger as log
+import uuid
+import json
+import os
+import time
+import threading
+import datetime
+import Queue
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.quota_prepare as quota_prepare
+import utils.env_prepare.stack_prepare as stack_prepare
+import utils.infra_setup.runner.yardstick as runner_yardstick
+import utils.infra_setup.runner.docker_env as docker_env
+import math
+
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+test_dict = {
+ "action": "runTestCase",
+ "args": {
+ "opts": {
+ "task-args": {}
+ },
+ "testcase": "netperf_bottlenecks"
+ }
+}
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+cidr = "/home/opnfv/repos/yardstick/samples/netperf_soak.yaml"
+runner_DEBUG = True
+
+q = Queue.Queue()
+
+
+def env_pre(test_config):
+ test_yardstick = False
+ if "yardstick" in test_config["contexts"].keys():
+ test_yardstick = True
+ stack_prepare._prepare_env_daemon(test_yardstick)
+ quota_prepare.quota_env_prepare()
+ LOG.info("yardstick environment prepare!")
+ if(test_config["contexts"]['yardstick_envpre']):
+ stdout = runner_yardstick.yardstick_image_prepare()
+ LOG.debug(stdout)
+
+
+def do_test(con_dic):
+ out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+ parameter_info = dict(test_time=con_dic["scenarios"]["vim_pair_ttl"])
+ yardstick_container = docker_env.yardstick_info['container']
+ cmd = runner_yardstick.yardstick_command_parser(debug=runner_DEBUG,
+ cidr=cidr,
+ outfile=out_file,
+ parameter=parameter_info)
+ stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ LOG.info(stdout)
+ out_value = 0
+ loop_value = 0
+ while loop_value < 60:
+ time.sleep(2)
+ loop_value = loop_value + 1
+ with open(out_file) as f:
+ data = json.load(f)
+ if data["result"]["criteria"] == "PASS":
+ LOG.info("Success run yardstick netperf_soak test!")
+ out_value = 1
+ break
+ elif data["result"]["criteria"] == "FAIL":
+ LOG.error("Failed run yardstick netperf_soak test!")
+ out_value = 0
+ break
+ q.put((out_value, data["result"]["testcases"]))
+ return out_value
+
+
+def config_to_result(
+ test_duration, added_duration, vim_pair_ttl,
+ vim_pair_lazy_cre_delay,
+ vim_pair_num, vim_pair_success_num, result):
+ testdata = {}
+ test_result = {}
+ test_result["test_duration"] = test_duration
+ test_result["sum_duration"] = added_duration
+ test_result["vim_pair_ttl"] = vim_pair_ttl
+ test_result["vim_pair_cre_interval"] = vim_pair_lazy_cre_delay
+ test_result["vim_pair_num"] = vim_pair_num
+ test_result["vim_pair_success_num"] = vim_pair_success_num
+ test_result["result"] = result
+ testdata["data_body"] = test_result
+ testdata["testcase"] = testcase
+ return testdata
+
+
+def func_run(con_dic):
+ test_date = do_test(con_dic)
+ return test_date
+
+
+def run(test_config):
+ con_dic = test_config["load_manager"]
+
+ env_pre(test_config)
+ LOG.info("yardstick environment prepare done!")
+
+ test_duration = float(
+ con_dic["scenarios"]["test_duration_hours"]) * 3600
+ vim_pair_ttl = float(
+ con_dic["scenarios"]["vim_pair_ttl"])
+ vim_pair_lazy_cre_delay = float(
+ con_dic["scenarios"]["vim_pair_lazy_cre_delay"])
+ vim_pair_num = int(math.ceil(
+ (test_duration - vim_pair_ttl) / vim_pair_lazy_cre_delay
+ ) + 1)
+
+ threadings = []
+ result = []
+ vim_pair_success_num = 0
+
+ start_time = datetime.datetime.now()
+
+ LOG.info("Data-path test duration are %i seconds", test_duration)
+ LOG.info("TTL of each VM pair are %i seconds", vim_pair_ttl)
+ LOG.info("Creation delay between VM pairs are %i seconds",
+ vim_pair_lazy_cre_delay)
+ LOG.info("Number of VM pairs to be created are %i", vim_pair_num)
+
+ for vim_pair_index in xrange(0, vim_pair_num):
+ index_thread = threading.Thread(target=func_run,
+ args=(con_dic,))
+ threadings.append(index_thread)
+ index_thread.start()
+ vim_pair_error = False
+ for wait_time in xrange(0, int(vim_pair_lazy_cre_delay)):
+ time.sleep(1)
+ while not q.empty():
+ result.append(q.get())
+ for one_result in result:
+ if '0' == one_result[0]:
+ vim_pair_error = True
+ break
+ if vim_pair_error:
+ break
+ for one_thread in threadings:
+ one_thread.join()
+ while not q.empty():
+ result.append(q.get())
+ for item in result:
+ vim_pair_success_num += int(item[0])
+
+ end_time = datetime.datetime.now()
+ added_duration = (end_time - start_time).seconds
+ LOG.info("Number of success VM pairs/threads are %s out %s ",
+ vim_pair_success_num, vim_pair_num)
+
+ return_result = config_to_result(
+ test_duration, added_duration, vim_pair_ttl,
+ vim_pair_lazy_cre_delay,
+ vim_pair_num, vim_pair_success_num, result
+ )
+
+ conf_parser.result_to_file(return_result, test_config["out_file"])
+
+ return vim_pair_error
diff --git a/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py b/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
index 1a54554c..9d8b0ec6 100644
--- a/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
+++ b/testsuites/posca/testcase_script/posca_factor_system_bandwidth.py
@@ -79,10 +79,10 @@ def do_test(test_config, Use_Dashboard, context_conf):
with open(out_file) as f:
data = json.load(f)
if data["status"] == 1:
- LOG.info("yardstick run success")
+ LOG.info("Success run yardstick netperf_bottlenecks test!")
break
elif data["status"] == 2:
- LOG.error("yardstick error exit")
+ LOG.error("Failed to run yardstick netperf_bottlenecks test!")
exit()
save_data = config_to_result(test_config, data['result'][1])
diff --git a/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py b/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py
deleted file mode 100644
index 2241d02f..00000000
--- a/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-"""This file realize the function of run systembandwidth script.
-for example this contain two part first run_script,
-second is algorithm, this part is about how to judge the bottlenecks.
-This test is using yardstick as a tool to begin test."""
-
-import os
-import time
-import utils.logger as log
-import utils.infra_setup.runner.yardstick as Runner
-from utils.parser import Parser as conf_parser
-import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
-# --------------------------------------------------
-# logging configuration
-# --------------------------------------------------
-LOG = log.Logger(__name__).getLogger()
-
-testfile = os.path.basename(__file__)
-testcase, file_format = os.path.splitext(testfile)
-
-
-def env_pre(con_dic):
- Runner.Create_Incluxdb(con_dic['runner_config'])
-
-
-def config_to_result(test_config, test_result):
- testdata = {}
- test_result["throughput"] = float(test_result["throughput"])
- test_result.update(test_config)
- testdata["data_body"] = test_result
- testdata["testcase"] = testcase
- return testdata
-
-
-def do_test(test_config, con_dic):
- test_case = con_dic['runner_config']['yardstick_testcase']
- test_dict = {
- "action": "runTestCase",
- "args": {
- "opts": {
- "task-args": test_config
- },
- "testcase": test_case
- }
- }
- Task_id = Runner.Send_Data(test_dict, con_dic['runner_config'])
- time.sleep(con_dic['test_config']['test_time'])
- Data_Reply = Runner.Get_Reply(con_dic['runner_config'], Task_id)
- try:
- test_date =\
- Data_Reply[con_dic['runner_config']['yardstick_testcase']][0]
- except IndexError:
- test_date = do_test(test_config, con_dic)
-
- save_data = config_to_result(test_config, test_date)
- if con_dic['runner_config']['dashboard'] == 'y':
- DashBoard.dashboard_send_data(con_dic['runner_config'], save_data)
-
- return save_data["data_body"]
-
-
-def run(con_dic):
- # can we specify these ranges from command line?
- low, high = con_dic['test_config']['num_vnfs']
- data = {
- "num_vnfs": range(low, high)
- }
- con_dic["result_file"] = os.path.dirname(
- os.path.abspath(__file__)) + "/test_case/result"
- pre_role_result = 1
- data_return = {}
- data_max = {}
- data_return["throughput"] = 1
-
- if con_dic["runner_config"]["yardstick_test_ip"] is None:
- con_dic["runner_config"]["yardstick_test_ip"] =\
- conf_parser.ip_parser("yardstick_test_ip")
-
- env_pre(con_dic)
-
- if con_dic["runner_config"]["dashboard"] == 'y':
- if con_dic["runner_config"]["dashboard_ip"] is None:
- con_dic["runner_config"]["dashboard_ip"] =\
- conf_parser.ip_parser("dashboard")
- LOG.info("Create Dashboard data")
- DashBoard.dashboard_system_bandwidth(con_dic["runner_config"])
-
- bandwidth_tmp = 1
- # vcpus and mem are scaled together
- for num_vnfs in data["scale_up_values"]:
- data_max["throughput"] = 1
- test_config = {
- "num_vnfs": num_vnfs,
- "test_time": con_dic['test_config']['test_time']
- }
- data_reply = do_test(test_config, con_dic)
- conf_parser.result_to_file(data_reply, con_dic["out_file"])
- # TODO: figure out which KPI to use
- bandwidth = data_reply["throughput"]
- if data_max["throughput"] < bandwidth:
- data_max = data_reply
- if abs(bandwidth_tmp - bandwidth) / float(bandwidth_tmp) < 0.025:
- LOG.info("this group of data has reached top output")
- break
- else:
- pre_reply = data_reply
- bandwidth_tmp = bandwidth
- cur_role_result = float(pre_reply["throughput"])
- if (abs(pre_role_result - cur_role_result) /
- float(pre_role_result) < 0.025):
- LOG.info("The performance increases slowly")
- if data_return["throughput"] < data_max["throughput"]:
- data_return = data_max
- pre_role_result = cur_role_result
- LOG.info("Find bottlenecks of this config")
- LOG.info("The max data is %d", data_return["throughput"])
- return data_return
diff --git a/testsuites/posca/testcase_script/posca_feature_moon_resources.py b/testsuites/posca/testcase_script/posca_feature_moon_resources.py
index 3c66c7b9..8b23824c 100644
--- a/testsuites/posca/testcase_script/posca_feature_moon_resources.py
+++ b/testsuites/posca/testcase_script/posca_feature_moon_resources.py
@@ -18,8 +18,9 @@ import uuid
import json
import utils.logger as log
from utils.parser import Parser as conf_parser
-import utils.env_prepare.stack_prepare as stack_prepare
+import utils.env_prepare.moon_prepare as moon_env
import utils.infra_setup.runner.docker_env as docker_env
+import testsuites.posca.testcase_dashboard.posca_feature_moon as DashBoard
import utils.infra_setup.runner.yardstick as yardstick_task
# --------------------------------------------------
@@ -29,35 +30,35 @@ LOG = log.Logger(__name__).getLogger()
testfile = os.path.basename(__file__)
testcase, file_format = os.path.splitext(testfile)
-# cidr = "/home/opnfv/repos/yardstick/samples/pvp_throughput_bottlenecks.yaml"
runner_DEBUG = True
-def env_pre(con_dic):
- LOG.info("yardstick environment prepare!")
- stack_prepare._prepare_env_daemon(True)
+def env_pre(test_config):
+ if "moon_monitoring" in test_config["contexts"].keys():
+ if test_config["contexts"]['moon_envpre'] is True:
+ moon_environment = test_config["contexts"]['moon_environment']
+ moon_env.moon_envprepare(moon_environment)
+ LOG.info("moon environment prepare!")
def config_to_result(test_config, test_result):
- final_data = []
- print(test_result)
+ final_data = {}
+ final_data["testcase"] = "posca_factor_moon_resources"
+ final_data["test_body"] = []
out_data = test_result["result"]["testcases"]
- test_data = out_data["pvp_throughput_bottlenecks"]["tc_data"]
+ test_data = out_data["moon_resource"]["tc_data"]
for result in test_data:
testdata = {}
- testdata["vcpu"] = test_config["vcpu"]
- testdata["memory"] = test_config["memory"]
- testdata["nrFlows"] = result["data"]["nrFlows"]
- testdata["packet_size"] = result["data"]["packet_size"]
- testdata["throughput"] = result["data"]["throughput_rx_mbps"]
- final_data.append(testdata)
+ testdata["tenant_number"] = int(test_config["tenant_number"])
+ testdata["max_user"] = result["data"]["max_user"]
+ final_data["test_body"].append(testdata)
return final_data
def testcase_parser(runner_conf, out_file="yardstick.out", **parameter_info):
cidr = "/home/opnfv/repos/yardstick/" + \
- runner_conf["yardstick_test_dir"] + \
- runner_conf["yardstick_testcase"]
+ runner_conf["yardstick_test_dir"] + "/" + \
+ runner_conf["yardstick_testcase"] + ".yaml"
cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
cidr=cidr,
outfile=out_file,
@@ -84,24 +85,21 @@ def do_test(runner_conf, test_config, Use_Dashboard, context_conf):
elif data["status"] == 2:
LOG.error("yardstick error exit")
exit()
- # data = json.load(output)
save_data = config_to_result(test_config, data)
if Use_Dashboard is True:
print("use dashboard")
- # DashBoard.dashboard_send_data(context_conf, save_data)
-
- # return save_data["data_body"]
+ DashBoard.dashboard_send_data(context_conf, save_data)
return save_data
def run(test_config):
load_config = test_config["load_manager"]
scenarios_conf = load_config["scenarios"]
- runner_conf = test_config["runners"]
+ runner_conf = load_config["runners"]
+ contexts_conf = test_config["contexts"]
Use_Dashboard = False
-
- env_pre(None)
+ env_pre(test_config)
if test_config["contexts"]["yardstick_ip"] is None:
load_config["contexts"]["yardstick_ip"] =\
conf_parser.ip_parser("yardstick_test_ip")
@@ -112,9 +110,14 @@ def run(test_config):
conf_parser.ip_parser("dashboard")
LOG.info("Create Dashboard data")
Use_Dashboard = True
- # DashBoard.dashboard_system_bandwidth(test_config["contexts"])
+ DashBoard.posca_moon_init(test_config["contexts"])
tenants_conf = conf_parser.str_to_list(scenarios_conf["tenants"])
+ subject_number = int(scenarios_conf["subject_number"])
+ object_number = int(scenarios_conf["object_number"])
+ timeout = scenarios_conf["timeout"]
+ consul_host = contexts_conf["moon_environment"]["ip"]
+ consul_port = contexts_conf["moon_environment"]["consul_port"]
load_config["result_file"] = os.path.dirname(
os.path.abspath(__file__)) + "/test_case/result"
@@ -122,7 +125,13 @@ def run(test_config):
result = []
for tenants in tenants_conf:
- case_config = {"tenants": tenants}
+ print tenants
+ case_config = {"tenant_number": tenants,
+ "subject_number": subject_number,
+ "object_number": object_number,
+ "timeout": timeout,
+ "consul_host": consul_host,
+ "consul_port": consul_port}
data_reply = do_test(runner_conf, case_config,
Use_Dashboard, test_config["contexts"])
diff --git a/testsuites/posca/testcase_script/posca_feature_moon_tenants.py b/testsuites/posca/testcase_script/posca_feature_moon_tenants.py
new file mode 100644
index 00000000..e932575c
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_feature_moon_tenants.py
@@ -0,0 +1,158 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test.'''
+
+import os
+import time
+import uuid
+import Queue
+import multiprocessing
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.moon_prepare as moon_env
+import utils.infra_setup.runner.docker_env as docker_env
+
+import utils.infra_setup.runner.yardstick as yardstick_task
+import testsuites.posca.testcase_dashboard.posca_feature_moon as DashBoard
+
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+runner_DEBUG = True
+manager = multiprocessing.Manager()
+switch = manager.Value('tmp', 0)
+
+
+def env_pre(test_config):
+ if "moon_monitoring" in test_config["contexts"].keys():
+ if test_config["contexts"]['moon_envpre'] is True:
+ moon_environment = test_config["contexts"]['moon_environment']
+ moon_env.moon_envprepare(moon_environment)
+ LOG.info("yardstick environment prepare!")
+
+
+def config_to_result(test_result):
+ final_data = {}
+ final_data["testcase"] = "posca_factor_moon_tenants"
+ final_data["test_body"] = []
+ final_data["test_body"].append(test_result)
+ return final_data
+
+
+def testcase_parser(runner_conf, out_file="yardstick.out", **parameter_info):
+ cidr = "/home/opnfv/repos/yardstick/" + \
+ runner_conf["yardstick_test_dir"] + "/" + \
+ runner_conf["yardstick_testcase"] + ".yaml"
+ cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+ cidr=cidr,
+ outfile=out_file,
+ parameter=parameter_info)
+ return cmd
+
+
+def do_test(runner_conf, test_config, Use_Dashboard, context_conf):
+ yardstick_container = docker_env.yardstick_info['container']
+ out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+ cmd = testcase_parser(runner_conf, out_file=out_file, **test_config)
+ print(cmd)
+ stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ LOG.info(stdout)
+ switch.value += 1
+ save_date = []
+ return save_date
+
+
+def run(test_config):
+ load_config = test_config["load_manager"]
+ scenarios_conf = load_config["scenarios"]
+ contexts_conf = test_config["contexts"]
+ runner_conf = load_config["runners"]
+ Use_Dashboard = False
+
+ env_pre(test_config)
+ if test_config["contexts"]["yardstick_ip"] is None:
+ load_config["contexts"]["yardstick_ip"] =\
+ conf_parser.ip_parser("yardstick_test_ip")
+
+ if "dashboard" in test_config["contexts"].keys():
+ if test_config["contexts"]["dashboard_ip"] is None:
+ test_config["contexts"]["dashboard_ip"] =\
+ conf_parser.ip_parser("dashboard")
+ LOG.info("Create Dashboard data")
+ Use_Dashboard = True
+ DashBoard.posca_moon_init(test_config["contexts"])
+
+ subject_number = int(scenarios_conf["subject_number"])
+ object_number = int(scenarios_conf["object_number"])
+ timeout = scenarios_conf["timeout"]
+ consul_host = contexts_conf["moon_environment"]["ip"]
+ consul_port = contexts_conf["moon_environment"]["consul_port"]
+
+ initial = scenarios_conf["initial_tenants"]
+ threshhold = scenarios_conf["steps_tenants"]
+ tolerate_time = scenarios_conf["tolerate_time"]
+ case_config = {"subject_number": subject_number,
+ "object_number": object_number,
+ "timeout": timeout,
+ "consul_host": consul_host,
+ "consul_port": consul_port}
+
+ process_queue = Queue.Queue()
+
+ load_config["result_file"] = os.path.dirname(
+ os.path.abspath(__file__)) + "/test_case/result"
+
+ result = 0
+
+ if initial is 0:
+ tenant_number = threshhold
+ else:
+ tenant_number = initial
+ while switch.value == 0:
+ LOG.info("Start %d process", tenant_number)
+ for tenant in range(0, tenant_number):
+ process = multiprocessing.Process(target=do_test,
+ args=(runner_conf,
+ case_config,
+ Use_Dashboard,
+ test_config["contexts"],
+ ))
+ process.start()
+ process_queue.put(process)
+
+ result = result + tenant_number
+ tenant_number = threshhold
+ time.sleep(tolerate_time)
+
+ while process_queue.qsize():
+ process = process_queue.get()
+ process.terminate()
+
+ if result is initial:
+ result = 0
+ else:
+ result = result - threshhold
+
+ testdate = {"tenant_max": result}
+ testresult = config_to_result(testdate)
+ LOG.info("Finished bottlenecks testcase")
+ LOG.info("The result data is %d", result)
+ if Use_Dashboard is True:
+ print "Use Dashboard"
+ DashBoard.dashboard_send_data(test_config["contexts"], testresult)
+
+ return testresult
diff --git a/testsuites/posca/testcase_script/posca_feature_testpmd_scale_up.py b/testsuites/posca/testcase_script/posca_feature_testpmd_scale_up.py
index 830ff73f..08c4cbe9 100644
--- a/testsuites/posca/testcase_script/posca_feature_testpmd_scale_up.py
+++ b/testsuites/posca/testcase_script/posca_feature_testpmd_scale_up.py
@@ -81,14 +81,10 @@ def do_test(test_config, Use_Dashboard, context_conf):
elif data["status"] == 2:
LOG.error("yardstick error exit")
exit()
- # data = json.load(output)
save_data = config_to_result(test_config, data)
if Use_Dashboard is True:
print("use dashboard")
- # DashBoard.dashboard_send_data(context_conf, save_data)
-
- # return save_data["data_body"]
return save_data
@@ -108,7 +104,6 @@ def run(test_config):
conf_parser.ip_parser("dashboard")
LOG.info("Create Dashboard data")
Use_Dashboard = True
- # DashBoard.dashboard_system_bandwidth(test_config["contexts"])
cpus = conf_parser.str_to_list(scenarios_conf["cpus"])
mems = conf_parser.str_to_list(scenarios_conf["mems"])
diff --git a/testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py b/testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py
new file mode 100644
index 00000000..417cf2b9
--- /dev/null
+++ b/testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+"""This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test."""
+
+import utils.logger as log
+import uuid
+import json
+import os
+import time
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.quota_prepare as quota_prepare
+import utils.env_prepare.stack_prepare as stack_prepare
+
+import utils.infra_setup.runner.docker_env as docker_env
+import utils.infra_setup.runner.yardstick as yardstick_task
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+
+testcase_name = ("tc_heat_rfc2544_ipv4_1rule_"
+ "1flow_64B_trex_correlated_traffic_scale_out")
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+cidr = ("/home/opnfv/repos/yardstick/samples/vnf_samples/nsut/acl/"
+ "tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_correlated_"
+ "traffic_scale_out.yaml")
+runner_DEBUG = True
+
+
+def env_pre(test_config):
+ test_yardstick = False
+ if "yardstick" in test_config["contexts"].keys():
+ test_yardstick = True
+ print(test_yardstick)
+ stack_prepare._prepare_env_daemon(test_yardstick)
+ quota_prepare.quota_env_prepare()
+ LOG.info("yardstick environment prepare!")
+ if(test_config["contexts"]['yardstick_envpre']):
+ stdout = yardstick_task.yardstick_image_prepare()
+ LOG.debug(stdout)
+
+
+def config_to_result(test_config, test_result):
+ final_data = []
+ print(test_result)
+ out_data = test_result["result"]["testcases"]
+ test_data = out_data[testcase_name]["tc_data"]
+ for result in test_data:
+ testdata = {}
+ testdata["sequence"] = result["sequence"]
+ traffic_result = result["data"]["tg__0"]
+ if traffic_result:
+ testdata["RxThroughput"] = traffic_result["RxThroughput"]
+ testdata["TxThroughput"] = traffic_result["TxThroughput"]
+ testdata["DropPercentage"] = traffic_result["DropPercentage"]
+ final_data.append(testdata)
+ return final_data
+
+
+def testcase_parser(out_file="yardstick.out", **parameter_info):
+ cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+ cidr=cidr,
+ outfile=out_file,
+ parameter=parameter_info)
+ return cmd
+
+
+def do_test(test_config, Use_Dashboard, context_conf):
+ yardstick_container = docker_env.yardstick_info['container']
+ out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+ cmd = testcase_parser(out_file=out_file, **test_config)
+ print(cmd)
+ stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+ LOG.info(stdout)
+ loop_value = 0
+ while loop_value < 60:
+ time.sleep(2)
+ loop_value = loop_value + 1
+ with open(out_file) as f:
+ data = json.load(f)
+ if data["status"] == 1:
+ LOG.info("yardstick run success")
+ break
+ elif data["status"] == 2:
+ LOG.error("yardstick error exit")
+ exit()
+
+ save_data = config_to_result(test_config, data)
+ if Use_Dashboard is True:
+ print("use dashboard")
+
+ return save_data
+
+
+def run(test_config):
+ print test_config
+ load_config = test_config["load_manager"]
+ scenarios_conf = load_config["scenarios"]
+ Use_Dashboard = True
+ env_pre(test_config)
+ if test_config["contexts"]["yardstick_ip"] is None:
+ load_config["contexts"]["yardstick_ip"] =\
+ conf_parser.ip_parser("yardstick_test_ip")
+
+ if "dashboard" in test_config["contexts"].keys():
+ if test_config["contexts"]["dashboard_ip"] is None:
+ test_config["contexts"]["dashboard_ip"] =\
+ conf_parser.ip_parser("dashboard")
+ LOG.info("Create Dashboard data")
+ Use_Dashboard = True
+
+ num_vnfs = conf_parser.str_to_list(scenarios_conf["number_vnfs"])
+ iterations = scenarios_conf["iterations"]
+ interval = scenarios_conf["interval"]
+ load_config["result_file"] = os.path.dirname(
+ os.path.abspath(__file__)) + "/test_case/result"
+
+ result = []
+
+ for i in range(0, len(num_vnfs)):
+ print i
+ case_config = {"num_vnfs": int(num_vnfs[i]),
+ "iterations": iterations,
+ "interval": interval}
+ data_reply = do_test(case_config, Use_Dashboard,
+ test_config["contexts"])
+ result.append(data_reply)
+
+ LOG.info("Finished bottlenecks testcase")
+ LOG.info("The result data is %s", result)
+ return result