aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc019.rst16
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml2
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_multi.py61
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py10
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py68
5 files changed, 148 insertions, 9 deletions
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc019.rst b/docs/testing/user/userguide/opnfv_yardstick_tc019.rst
index 1af502253..57e8ddf79 100644
--- a/docs/testing/user/userguide/opnfv_yardstick_tc019.rst
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc019.rst
@@ -21,8 +21,8 @@ Yardstick Test Case Description TC019
+--------------+--------------------------------------------------------------+
|test method | This test case kills the processes of a specific Openstack |
| | service on a selected control node, then checks whether the |
-| | request of the related Openstack command is OK and the killed|
-| | processes are recovered. |
+| | request of the related Openstack command is OK and the |
+| | killed processes are recovered. |
| | |
+--------------+--------------------------------------------------------------+
|attackers | In this test case, an attacker called "kill-process" is |
@@ -52,8 +52,8 @@ Yardstick Test Case Description TC019
| | |
| | 2. the "process" monitor check whether a process is running |
| | on a specific node, which needs three parameters: |
-| | 1) monitor_type: which used for finding the monitor class and|
-| | related scritps. It should be always set to "process" |
+| | 1) monitor_type: which used for finding the monitor class |
+| | and related scritps. It should be always set to "process" |
| | for this monitor. |
| | 2) process_name: which is the process name for monitor |
| | 3) host: which is the name of the node runing the process |
@@ -61,7 +61,7 @@ Yardstick Test Case Description TC019
| | e.g. |
| | monitor1: |
| | -monitor_type: "openstack-cmd" |
-| | -command_name: "nova image-list" |
+| | -command_name: "openstack server list" |
| | monitor2: |
| | -monitor_type: "process" |
| | -process_name: "nova-api" |
@@ -123,9 +123,9 @@ Yardstick Test Case Description TC019
| | Result: The test case is passed or not. |
| | |
+--------------+--------------------------------------------------------------+
-|post-action | It is the action when the test cases exist. It will check the|
-| | status of the specified process on the host, and restart the |
-| | process if it is not running for next test cases |
+|post-action | It is the action when the test cases exist. It will check |
+| | the status of the specified process on the host, and restart |
+| | the process if it is not running for next test cases |
| | |
+--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml
index 7c7a0c110..5d3057dc3 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml
@@ -26,7 +26,7 @@ scenarios:
wait_time: 10
monitors:
- monitor_type: "openstack-cmd"
- command_name: "openstack image list"
+ command_name: "openstack server list"
monitor_time: 10
monitor_number: 3
sla:
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py b/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
new file mode 100644
index 000000000..9539f27d0
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huan Li and others
+# lihuansse@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.monitor
+# .monitor_multi
+
+from __future__ import absolute_import
+import mock
+import unittest
+from yardstick.benchmark.scenarios.availability.monitor import monitor_multi
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
+ 'monitor_general.ssh')
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
+ 'monitor_general.open')
+class MultiMonitorServiceTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.monitor_cfg = {
+ 'monitor_type': 'general-monitor',
+ 'monitor_number': 3,
+ 'key': 'service-status',
+ 'monitor_key': 'service-status',
+ 'host': 'node1',
+ 'monitor_time': 3,
+ 'parameter': {'serviceName': 'haproxy'},
+ 'sla': {'max_outage_time': 1}
+ }
+
+ def test__monitor_multi_all_successful(self, mock_open, mock_ssh):
+ ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context);
+
+ mock_ssh.SSH().execute.return_value = (0, "running", '')
+
+ ins.start_monitor();
+ ins.wait_monitor();
+ ins.verify_SLA()
+
+ def test__monitor_multi_all_fail(self, mock_open, mock_ssh):
+ ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context);
+
+ mock_ssh.SSH().execute.return_value = (1, "failed", '')
+
+ ins.start_monitor();
+ ins.wait_monitor();
+ ins.verify_SLA()
+
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
index 3062037ee..a0fc5965b 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -34,6 +34,11 @@ class MonitorMgr(object):
for monitor_cfg in monitor_cfgs:
monitor_type = monitor_cfg["monitor_type"]
monitor_cls = BaseMonitor.get_monitor_cls(monitor_type)
+
+ monitor_number = monitor_cfg.get("monitor_number", 1)
+ if monitor_number > 1:
+ monitor_cls = BaseMonitor.get_monitor_cls("multi-monitor")
+
monitor_ins = monitor_cls(monitor_cfg, context)
if "key" in monitor_cfg:
monitor_ins.key = monitor_cfg["key"]
@@ -133,6 +138,8 @@ class BaseMonitor(multiprocessing.Process):
self._queue.put({"total_time": total_time,
"outage_time": last_outage - first_outage,
+ "last_outage": last_outage,
+ "first_outage": first_outage,
"total_count": total_count,
"outage_count": outage_count})
@@ -152,3 +159,6 @@ class BaseMonitor(multiprocessing.Process):
def verify_SLA(self):
pass
+
+ def result(self):
+ return self._result
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
new file mode 100644
index 000000000..8df2ea282
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
@@ -0,0 +1,68 @@
+##############################################################################
+# Copyright (c) 2017 Huan Li and others
+# lihuansse@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import absolute_import
+import logging
+import time
+
+from yardstick.benchmark.scenarios.availability.monitor import basemonitor
+
+LOG = logging.getLogger(__name__)
+
+
+class MultiMonitor(basemonitor.BaseMonitor):
+
+ __monitor_type__ = "multi-monitor"
+
+ def __init__(self, config, context):
+ super(MultiMonitor, self).__init__(config, context)
+
+ self.monitors = []
+ monitor_type = self._config["monitor_type"]
+ monitor_cls = basemonitor.BaseMonitor.get_monitor_cls(monitor_type)
+
+ monitor_number = self._config.get("monitor_number", 1)
+ for i in range(monitor_number):
+ monitor_ins = monitor_cls(self._config, self._context)
+ self.monitors.append(monitor_ins)
+
+ def start_monitor(self):
+ for monitor in self.monitors:
+ monitor.start_monitor()
+
+ def wait_monitor(self):
+ for monitor in self.monitors:
+ monitor.wait_monitor()
+
+ def verify_SLA(self):
+ first_outage = time.time()
+ last_outage = 0
+
+ for monitor in self.monitors:
+ monitor_result = monitor.result()
+ monitor_first_outage = monitor_result.get('first_outage', None)
+ monitor_last_outage = monitor_result.get('last_outage', None)
+
+ if monitor_first_outage is None or monitor_last_outage is None:
+ continue
+
+ if monitor_first_outage < first_outage:
+ first_outage = monitor_first_outage
+
+ if monitor_last_outage > last_outage:
+ last_outage = monitor_last_outage
+ LOG.debug("multi monitor result: %f , %f", first_outage, last_outage)
+
+ outage_time = last_outage - first_outage
+ max_outage_time = self._config["sla"]["max_outage_time"]
+ if outage_time > max_outage_time:
+ LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
+ return False
+ else:
+ return True