summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml87
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml43
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml42
-rw-r--r--tests/opnfv/test_suites/fuel_test_suite.yaml12
-rw-r--r--tests/opnfv/test_suites/opnfv_huawei-pod2_daily.yaml2
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py88
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_director.py103
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_result_checker_general.py113
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_scenario_general.py65
-rw-r--r--tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt12
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_cpuload.py161
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vsperf.py132
12 files changed, 786 insertions, 74 deletions
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml
new file mode 100644
index 000000000..d7406832d
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml
@@ -0,0 +1,87 @@
+---
+# Yardstick TC044 config file
+# Measure memory usage statistics, network throughput, latency and packet loss.
+# Different amounts of flows are tested with, from 2 up to 1001000.
+# All tests are run 2 times each. First 2 times with the least
+# amount of ports, then 2 times with the next amount of ports,
+# and so on until all packet sizes have been run with.
+#
+# During the measurements memory usage statistics and network latency are
+# recorded/measured using sar and ping, respectively.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: MEMORYload
+ run_in_background: true
+
+ options:
+ interval: 1
+ count: 1
+
+ host: demeter.yardstick-TC044
+-
+ type: MEMORYload
+ run_in_background: true
+
+ options:
+ interval: 1
+ count: 1
+
+ host: poseidon.yardstick-TC044
+-
+ type: Ping
+ run_in_background: true
+
+ options:
+ packetsize: 100
+
+ host: demeter.yardstick-TC044
+ target: poseidon.yardstick-TC044
+
+ sla:
+ max_rtt: 10
+ action: monitor
+{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %}
+-
+ type: Pktgen
+ options:
+ packetsize: 64
+ number_of_ports: {{num_ports}}
+ duration: 20
+
+ host: demeter.yardstick-TC044
+ target: poseidon.yardstick-TC044
+
+ runner:
+ type: Iteration
+ iterations: 2
+ interval: 1
+
+ sla:
+ max_ppm: 1000
+ action: monitor
+{% endfor %}
+
+context:
+ name: yardstick-TC044
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ubuntu
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ demeter:
+ floating_ip: true
+ placement: "pgrp1"
+ poseidon:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml
new file mode 100644
index 000000000..812d53dd8
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml
@@ -0,0 +1,43 @@
+---
+# Test case for TC045 :Control node Openstack service down - neutron server
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "kill-process"
+ process_name: "neutron-server"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "neutron agent-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "process"
+ process_name: "neutron-server"
+ host: node1
+ monitor_time: 10
+ sla:
+ max_recover_time: 5
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml
new file mode 100644
index 000000000..867553d21
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml
@@ -0,0 +1,42 @@
+---
+# Test case for TC046 :Control node Openstack service down - keystone
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "kill-process"
+ process_name: "keystone"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "keystone user-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "process"
+ process_name: "keystone"
+ host: node1
+ monitor_time: 10
+ sla:
+ max_recover_time: 5
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/tests/opnfv/test_suites/fuel_test_suite.yaml b/tests/opnfv/test_suites/fuel_test_suite.yaml
new file mode 100644
index 000000000..016bf0953
--- /dev/null
+++ b/tests/opnfv/test_suites/fuel_test_suite.yaml
@@ -0,0 +1,12 @@
+---
+# Fuel integration test task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "fuel_test_suite"
+test_cases_dir: "samples/"
+test_cases:
+-
+ file_name: ping.yaml
+-
+ file_name: iperf3.yaml
diff --git a/tests/opnfv/test_suites/opnfv_huawei-pod2_daily.yaml b/tests/opnfv/test_suites/opnfv_huawei-pod2_daily.yaml
index 3a3bfccc0..435d21c9e 100644
--- a/tests/opnfv/test_suites/opnfv_huawei-pod2_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_huawei-pod2_daily.yaml
@@ -19,6 +19,4 @@ test_cases:
-
file_name: opnfv_yardstick_tc014.yaml
-
- file_name: opnfv_yardstick_tc027.yaml
--
file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py b/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
new file mode 100644
index 000000000..9972d6b1b
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huan Li and others
+# lihuansse@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.result_checker
+# .baseresultchecker
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.result_checker import baseresultchecker
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.result_checker'
+ '.baseresultchecker.BaseResultChecker')
+class ResultCheckerMgrTestCase(unittest.TestCase):
+
+ def setUp(self):
+ config = {
+ 'checker_type': 'general-result-checker',
+ 'key' : 'process-checker'
+ }
+
+ self.checker_configs = []
+ self.checker_configs.append(config)
+
+ def test_ResultCheckerMgr_setup_successful(self, mock_basechacer):
+ mgr_ins = baseresultchecker.ResultCheckerMgr()
+ mgr_ins.init_ResultChecker(self.checker_configs, None)
+ mgr_ins.verify()
+
+ def test_getitem_succeessful(self, mock_basechacer):
+ mgr_ins = baseresultchecker.ResultCheckerMgr()
+ mgr_ins.init_ResultChecker(self.checker_configs, None)
+ checker_ins = mgr_ins["process-checker"]
+
+ def test_getitem_fail(self, mock_basechacer):
+ mgr_ins = baseresultchecker.ResultCheckerMgr()
+ mgr_ins.init_ResultChecker(self.checker_configs, None)
+ with self.assertRaises(KeyError):
+ checker_ins = mgr_ins["checker-not-exist"]
+
+
+class BaseResultCheckerTestCase(unittest.TestCase):
+
+ class ResultCheckeSimple(baseresultchecker.BaseResultChecker):
+ __result_checker__type__ = "ResultCheckeForTest"
+ def setup(self):
+ self.success = False
+
+ def verify(self):
+ return self.success
+
+ def setUp(self):
+ self.checker_cfg = {
+ 'checker_type': 'general-result-checker',
+ 'key' : 'process-checker'
+ }
+
+ def test_baseresultchecker_setup_verify_successful(self):
+ ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
+ ins.setup()
+ ins.verify()
+
+ def test_baseresultchecker_verfiy_pass(self):
+ ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
+ ins.setup()
+ ins.actualResult = True
+ ins.expectedResult = True
+ ins.verify()
+
+ def test_get_script_fullpath(self):
+ ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
+ path = ins.get_script_fullpath("test.bash")
+
+ def test_get_resultchecker_cls_successful(self):
+ baseresultchecker.BaseResultChecker.get_resultchecker_cls("ResultCheckeForTest")
+
+ def test_get_resultchecker_cls_fail(self):
+ with self.assertRaises(RuntimeError):
+ baseresultchecker.BaseResultChecker.get_resultchecker_cls("ResultCheckeNotExist")
diff --git a/tests/unit/benchmark/scenarios/availability/test_director.py b/tests/unit/benchmark/scenarios/availability/test_director.py
new file mode 100644
index 000000000..887ddd631
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_director.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huan Li and others
+# lihuansse@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.director
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.director import Director
+from yardstick.benchmark.scenarios.availability import actionplayers
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.director.basemonitor')
+@mock.patch('yardstick.benchmark.scenarios.availability.director.baseattacker')
+@mock.patch('yardstick.benchmark.scenarios.availability.director.baseoperation')
+@mock.patch('yardstick.benchmark.scenarios.availability.director.baseresultchecker')
+class DirectorTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.scenario_cfg = {
+ 'type': "general_scenario",
+ 'options': {
+ 'attackers':[{
+ 'fault_type': "general-attacker",
+ 'key': "kill-process"}],
+ 'monitors': [{
+ 'monitor_type': "general-monitor",
+ 'key': "service_status"}],
+ 'operations': [{
+ 'operation_type': 'general-operation',
+ 'key' : 'service_status'}],
+ 'resultCheckers': [{
+ 'checker_type': 'general-result-checker',
+ 'key' : 'process-checker',}],
+ 'steps':[
+ {
+ 'actionKey': "service_status",
+ 'actionType': "operation",
+ 'index': 1},
+ {
+ 'actionKey': "kill-process",
+ 'actionType': "attacker",
+ 'index': 2},
+ {
+ 'actionKey': "process-checker",
+ 'actionType': "resultchecker",
+ 'index': 3},
+ {
+ 'actionKey': "service_status",
+ 'actionType': "monitor",
+ 'index': 4},
+ ]
+ }
+ }
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.ctx = {"nodes": {"node1": host}}
+
+ def test_director_all_successful(self, mock_checer, mock_opertion, mock_attacker, mock_monitor):
+ ins = Director(self.scenario_cfg, self.ctx)
+ opertion_action = ins.createActionPlayer("operation", "service_status")
+ attacker_action = ins.createActionPlayer("attacker", "kill-process")
+ checker_action = ins.createActionPlayer("resultchecker", "process-checker")
+ monitor_action = ins.createActionPlayer("monitor", "service_status")
+
+ opertion_rollback = ins.createActionRollbacker("operation", "service_status")
+ attacker_rollback = ins.createActionRollbacker("attacker", "kill-process")
+ ins.executionSteps.append(opertion_rollback)
+ ins.executionSteps.append(attacker_rollback)
+
+ opertion_action.action()
+ attacker_action.action()
+ checker_action.action()
+ monitor_action.action()
+
+ attacker_rollback.rollback()
+ opertion_rollback.rollback()
+
+ ins.stopMonitors()
+ ins.verify()
+ ins.knockoff()
+
+ def test_director_get_wrong_item(self, mock_checer, mock_opertion, mock_attacker, mock_monitor):
+ ins = Director(self.scenario_cfg, self.ctx)
+ ins.createActionPlayer("wrong_type", "wrong_key")
+ ins.createActionRollbacker("wrong_type", "wrong_key")
+
+
+
+
+
+
diff --git a/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py b/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
new file mode 100644
index 000000000..88a9b9d20
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huan Li and others
+# lihuansse@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.result_checker
+# .result_checker_general
+
+import mock
+import unittest
+import copy
+
+from yardstick.benchmark.scenarios.availability.result_checker import result_checker_general
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.result_checker.'
+ 'result_checker_general.ssh')
+@mock.patch('yardstick.benchmark.scenarios.availability.result_checker.'
+ 'result_checker_general.open')
+class GeneralResultCheckerTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.checker_cfg = {
+ 'parameter': {'processname': 'process'},
+ 'checker_type': 'general-result-checker',
+ 'condition' : 'eq',
+ 'expectedValue' : 1,
+ 'key' : 'process-checker',
+ 'host': 'node1'
+ }
+
+ def test__result_checker_eq(self, mock_open, mock_ssh):
+ ins = result_checker_general.GeneralResultChecker(self.checker_cfg,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_gt(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'gt'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "2", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_gt_eq(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'gt_eq'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_lt(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'lt'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "0", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_lt_eq(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'lt_eq'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_in(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'in'
+ config['expectedValue'] = "value"
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "value return", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_wrong(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'wrong'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ self.assertFalse(ins.verify())
+
+ def test__result_checker_fail(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config.pop('parameter')
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (1, "fail", '')
+ ins.setup()
+ ins.verify() \ No newline at end of file
diff --git a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
new file mode 100644
index 000000000..c17edea45
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huan Li and others
+# lihuansse@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.scenario_general
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.scenario_general import ScenarioGeneral
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.scenario_general.Director')
+class ScenarioGeneralTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.scenario_cfg = {
+ 'type': "general_scenario",
+ 'options': {
+ 'attackers':[{
+ 'fault_type': "general-attacker",
+ 'key': "kill-process"}],
+ 'monitors': [{
+ 'monitor_type': "general-monitor",
+ 'key': "service_status"}],
+ 'steps':[
+ {
+ 'actionKey': "kill-process",
+ 'actionType': "attacker",
+ 'index': 1},
+ {
+ 'actionKey': "service_status",
+ 'actionType': "monitor",
+ 'index': 2}]
+ }
+ }
+
+ def test_scenario_general_all_successful(self, mock_director):
+ ins = ScenarioGeneral(self.scenario_cfg, None)
+ ins.setup()
+ ins.run(None)
+ ins.teardown()
+
+ def test_scenario_general_exception(self, mock_director):
+ ins = ScenarioGeneral(self.scenario_cfg, None)
+ mock_obj = mock.Mock()
+ mock_obj.createActionPlayer.side_effect = KeyError('Wrong')
+ ins.director = mock_obj
+ ins.run(None)
+ ins.teardown()
+
+ def test_scenario_general_case_fail(self, mock_director):
+ ins = ScenarioGeneral(self.scenario_cfg, None)
+ mock_obj = mock.Mock()
+ mock_obj.verify.return_value = False
+ ins.director = mock_obj
+ ins.run(None)
+ ins.teardown() \ No newline at end of file
diff --git a/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt b/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt
index b1723ae17..723e64bcb 100644
--- a/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt
+++ b/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt
@@ -1,5 +1,9 @@
-Linux 3.13.0-68-generic (elxg482ls42) 11/30/2015 _x86_64_ (12 CPU)
+Linux 3.13.0-68-generic (elxg482ls42) 11/30/2015 _x86_64_ (1 CPU)
-04:53:04 PM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
-04:53:04 PM all 11.31 0.03 1.19 0.18 0.00 0.01 0.00 5.51 0.00 81.77
-04:53:04 PM 0 20.03 0.03 1.36 0.33 0.00 0.06 0.00 6.62 0.00 71.56
+04:34:26 PM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
+04:34:26 PM all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
+04:34:26 PM 0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
+
+Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
+Average: all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
+Average: 0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
diff --git a/tests/unit/benchmark/scenarios/compute/test_cpuload.py b/tests/unit/benchmark/scenarios/compute/test_cpuload.py
index 22c4419b2..77f2a02d8 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cpuload.py
+++ b/tests/unit/benchmark/scenarios/compute/test_cpuload.py
@@ -33,7 +33,14 @@ class CPULoadTestCase(unittest.TestCase):
self.result = {}
def test_setup_mpstat_installed(self, mock_ssh):
- l = cpuload.CPULoad({}, self.ctx)
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
l.setup()
@@ -42,7 +49,14 @@ class CPULoadTestCase(unittest.TestCase):
self.assertTrue(l.has_mpstat)
def test_setup_mpstat_not_installed(self, mock_ssh):
- l = cpuload.CPULoad({}, self.ctx)
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
mock_ssh.SSH().execute.return_value = (127, '', '')
l.setup()
@@ -51,7 +65,14 @@ class CPULoadTestCase(unittest.TestCase):
self.assertFalse(l.has_mpstat)
def test_execute_command_success(self, mock_ssh):
- l = cpuload.CPULoad({}, self.ctx)
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
l.setup()
@@ -61,7 +82,14 @@ class CPULoadTestCase(unittest.TestCase):
self.assertEqual(result, expected_result)
def test_execute_command_failed(self, mock_ssh):
- l = cpuload.CPULoad({}, self.ctx)
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
l.setup()
@@ -70,7 +98,14 @@ class CPULoadTestCase(unittest.TestCase):
"cat /proc/loadavg")
def test_get_loadavg(self, mock_ssh):
- l = cpuload.CPULoad({}, self.ctx)
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
l.setup()
@@ -82,44 +117,63 @@ class CPULoadTestCase(unittest.TestCase):
self.assertEqual(result, expected_result)
def test_get_cpu_usage_mpstat(self, mock_ssh):
- l = cpuload.CPULoad({}, self.ctx)
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
l.setup()
- l.interval = 0
+ l.interval = 1
+ l.count = 1
mpstat_output = self._read_file("cpuload_sample_output1.txt")
mock_ssh.SSH().execute.return_value = (0, mpstat_output, '')
result = l._get_cpu_usage_mpstat()
expected_result = \
- {'mpstat':
- {'cpu':
- {'%gnice': '0.00',
- '%guest': '5.51',
- '%idle': '81.77',
- '%iowait': '0.18',
- '%irq': '0.00',
- '%nice': '0.03',
- '%soft': '0.01',
- '%steal': '0.00',
- '%sys': '1.19',
- '%usr': '11.31'},
- 'cpu0':
- {'%gnice': '0.00',
- '%guest': '6.62',
- '%idle': '71.56',
- '%iowait': '0.33',
- '%irq': '0.00',
- '%nice': '0.03',
- '%soft': '0.06',
- '%steal': '0.00',
- '%sys': '1.36',
- '%usr': '20.03'}}}
+ {"mpstat_minimum":
+ {"cpu": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"},
+ "cpu0": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"}},
+ "mpstat_average":
+ {"cpu": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"},
+ "cpu0": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"}},
+ "mpstat_maximun":
+ {"cpu": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"},
+ "cpu0": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"}}}
self.assertDictEqual(result, expected_result)
def test_get_cpu_usage(self, mock_ssh):
- l = cpuload.CPULoad({}, self.ctx)
+ options = {
+ "interval": 0,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
l.setup()
@@ -154,45 +208,16 @@ class CPULoadTestCase(unittest.TestCase):
'%nice': '0.03'}}}
self.assertDictEqual(result, expected_result)
+
+ def test_run_proc_stat(self, mock_ssh):
+ options = {
+ "interval": 1,
+ "count": 1
+ }
- def test_run_mpstat(self, mock_ssh):
- l = cpuload.CPULoad({'options': {'interval': 1}}, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, '', '')
-
- mpstat_output = self._read_file("cpuload_sample_output1.txt")
- mock_ssh.SSH().execute.side_effect = \
- [(0, '', ''), (0, '1.50 1.45 1.51 3/813 14322', ''), (0, mpstat_output, '')]
-
- l.run(self.result)
-
- expected_result = {
- 'loadavg': ['1.50', '1.45', '1.51', '3/813', '14322'],
- 'mpstat':
- {'cpu': {'%gnice': '0.00',
- '%guest': '5.51',
- '%idle': '81.77',
- '%iowait': '0.18',
- '%irq': '0.00',
- '%nice': '0.03',
- '%soft': '0.01',
- '%steal': '0.00',
- '%sys': '1.19',
- '%usr': '11.31'},
- 'cpu0': {'%gnice': '0.00',
- '%guest': '6.62',
- '%idle': '71.56',
- '%iowait': '0.33',
- '%irq': '0.00',
- '%nice': '0.03',
- '%soft': '0.06',
- '%steal': '0.00',
- '%sys': '1.36',
- '%usr': '20.03'}}}
-
- self.assertDictEqual(self.result, expected_result)
+ args = {'options': options}
- def test_run_proc_stat(self, mock_ssh):
- l = cpuload.CPULoad({}, self.ctx)
+ l = cpuload.CPULoad(args, self.ctx)
mock_ssh.SSH().execute.return_value = (1, '', '')
l.setup()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/tests/unit/benchmark/scenarios/networking/test_vsperf.py
new file mode 100644
index 000000000..cb5c09ab3
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+
+# Copyright 2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Unittest for yardstick.benchmark.scenarios.networking.vsperf.Vsperf
+
+import mock
+import unittest
+import os
+import subprocess
+
+from yardstick.benchmark.scenarios.networking import vsperf
+
+
+@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.subprocess')
+@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.ssh')
+@mock.patch("__builtin__.open", return_value=None)
+class VsperfTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ "host": {
+ "ip": "10.229.47.137",
+ "user": "ubuntu",
+ "password": "ubuntu",
+ },
+ }
+ self.args = {
+ 'options': {
+ 'testname': 'rfc2544_p2p_continuous',
+ 'traffic_type': 'continuous',
+ 'pkt_sizes': '64',
+ 'bidirectional': 'True',
+ 'iload': 100,
+ 'duration': 29,
+ 'trafficgen_port1': 'eth1',
+ 'trafficgen_port2': 'eth3',
+ 'external_bridge': 'br-ex',
+ 'conf-file': 'vsperf-yardstick.conf',
+ 'setup-script': 'setup_yardstick.sh',
+ },
+ 'sla': {
+ 'metrics': 'throughput_rx_fps',
+ 'throughput_rx_fps': 500000,
+ 'action': 'monitor',
+ }
+ }
+
+ def test_vsperf_setup(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ p.setup()
+ self.assertIsNotNone(p.client)
+ self.assertEqual(p.setup_done, True)
+
+ def test_vsperf_teardown(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+
+ # setup() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ p.setup()
+ self.assertIsNotNone(p.client)
+ self.assertEqual(p.setup_done, True)
+
+ p.teardown()
+ self.assertEqual(p.setup_done, False)
+
+ def test_vsperf_run_ok(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+
+ # setup() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ # run() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_ssh.SSH().execute.return_value = (0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+
+ result = {}
+ p.run(result)
+
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
+
+ def test_vsperf_run_falied_vsperf_execution(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+
+ # setup() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ # run() specific mocks
+ mock_ssh.SSH().execute.return_value = (1, '', '')
+
+ result = {}
+ self.assertRaises(RuntimeError, p.run, result)
+
+ def test_vsperf_run_falied_csv_report(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+
+ # setup() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ # run() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_ssh.SSH().execute.return_value = (1, '', '')
+
+ result = {}
+ self.assertRaises(RuntimeError, p.run, result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()