aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py88
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_result_checker_general.py113
-rwxr-xr-xyardstick/benchmark/scenarios/availability/__init__.py25
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py88
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py107
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker_conf.yaml11
7 files changed, 432 insertions, 0 deletions
diff --git a/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py b/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
new file mode 100644
index 000000000..9972d6b1b
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huan Li and others
+# lihuansse@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.result_checker
+# .baseresultchecker
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.result_checker import baseresultchecker
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.result_checker'
+ '.baseresultchecker.BaseResultChecker')
+class ResultCheckerMgrTestCase(unittest.TestCase):
+
+ def setUp(self):
+ config = {
+ 'checker_type': 'general-result-checker',
+ 'key' : 'process-checker'
+ }
+
+ self.checker_configs = []
+ self.checker_configs.append(config)
+
+ def test_ResultCheckerMgr_setup_successful(self, mock_basechacer):
+ mgr_ins = baseresultchecker.ResultCheckerMgr()
+ mgr_ins.init_ResultChecker(self.checker_configs, None)
+ mgr_ins.verify()
+
+ def test_getitem_succeessful(self, mock_basechacer):
+ mgr_ins = baseresultchecker.ResultCheckerMgr()
+ mgr_ins.init_ResultChecker(self.checker_configs, None)
+ checker_ins = mgr_ins["process-checker"]
+
+ def test_getitem_fail(self, mock_basechacer):
+ mgr_ins = baseresultchecker.ResultCheckerMgr()
+ mgr_ins.init_ResultChecker(self.checker_configs, None)
+ with self.assertRaises(KeyError):
+ checker_ins = mgr_ins["checker-not-exist"]
+
+
+class BaseResultCheckerTestCase(unittest.TestCase):
+
+ class ResultCheckeSimple(baseresultchecker.BaseResultChecker):
+ __result_checker__type__ = "ResultCheckeForTest"
+ def setup(self):
+ self.success = False
+
+ def verify(self):
+ return self.success
+
+ def setUp(self):
+ self.checker_cfg = {
+ 'checker_type': 'general-result-checker',
+ 'key' : 'process-checker'
+ }
+
+ def test_baseresultchecker_setup_verify_successful(self):
+ ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
+ ins.setup()
+ ins.verify()
+
+ def test_baseresultchecker_verfiy_pass(self):
+ ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
+ ins.setup()
+ ins.actualResult = True
+ ins.expectedResult = True
+ ins.verify()
+
+ def test_get_script_fullpath(self):
+ ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
+ path = ins.get_script_fullpath("test.bash")
+
+ def test_get_resultchecker_cls_successful(self):
+ baseresultchecker.BaseResultChecker.get_resultchecker_cls("ResultCheckeForTest")
+
+ def test_get_resultchecker_cls_fail(self):
+ with self.assertRaises(RuntimeError):
+ baseresultchecker.BaseResultChecker.get_resultchecker_cls("ResultCheckeNotExist")
diff --git a/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py b/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
new file mode 100644
index 000000000..88a9b9d20
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huan Li and others
+# lihuansse@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.result_checker
+# .result_checker_general
+
+import mock
+import unittest
+import copy
+
+from yardstick.benchmark.scenarios.availability.result_checker import result_checker_general
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.result_checker.'
+ 'result_checker_general.ssh')
+@mock.patch('yardstick.benchmark.scenarios.availability.result_checker.'
+ 'result_checker_general.open')
+class GeneralResultCheckerTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.checker_cfg = {
+ 'parameter': {'processname': 'process'},
+ 'checker_type': 'general-result-checker',
+ 'condition' : 'eq',
+ 'expectedValue' : 1,
+ 'key' : 'process-checker',
+ 'host': 'node1'
+ }
+
+ def test__result_checker_eq(self, mock_open, mock_ssh):
+ ins = result_checker_general.GeneralResultChecker(self.checker_cfg,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_gt(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'gt'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "2", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_gt_eq(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'gt_eq'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_lt(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'lt'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "0", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_lt_eq(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'lt_eq'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_in(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'in'
+ config['expectedValue'] = "value"
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "value return", '')
+ ins.setup()
+ self.assertTrue(ins.verify())
+
+ def test__result_checker_wrong(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config['condition'] = 'wrong'
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ self.assertFalse(ins.verify())
+
+ def test__result_checker_fail(self, mock_open, mock_ssh):
+ config = copy.deepcopy(self.checker_cfg)
+ config.pop('parameter')
+ ins = result_checker_general.GeneralResultChecker(config,
+ self.context);
+ mock_ssh.SSH().execute.return_value = (1, "fail", '')
+ ins.setup()
+ ins.verify() \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/__init__.py b/yardstick/benchmark/scenarios/availability/__init__.py
index e69de29bb..fdad0fe95 100755
--- a/yardstick/benchmark/scenarios/availability/__init__.py
+++ b/yardstick/benchmark/scenarios/availability/__init__.py
@@ -0,0 +1,25 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ActionType:
+ ATTACKER = "attacker"
+ MONITOR = "monitor"
+ RESULTCHECKER = "resultchecker"
+ RESULTCOMPARER = "comparer"
+ OPERATION = "operation"
+
+
+class Condition:
+ EQUAL = "eq"
+ GREATERTHAN = "gt"
+ GREATERTHANEQUAL = "gt_eq"
+ LESSTHAN = "lt"
+ LESSTHANEQUAL = "lt_eq"
+ IN = "in"
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/__init__.py b/yardstick/benchmark/scenarios/availability/result_checker/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker/__init__.py
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py b/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
new file mode 100644
index 000000000..1bdb9f2c2
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
@@ -0,0 +1,88 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import yaml
+import logging
+import os
+
+import yardstick.common.utils as utils
+
+LOG = logging.getLogger(__name__)
+
+resultchecker_conf_path = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ "result_checker_conf.yaml")
+
+
+class ResultCheckerMgr(object):
+
+ def __init__(self):
+ self._result_checker_list = []
+
+ def init_ResultChecker(self, resultchecker_cfgs, context):
+ LOG.debug("resultcheckerMgr confg: %s" % resultchecker_cfgs)
+
+ for cfg in resultchecker_cfgs:
+ resultchecker_type = cfg['checker_type']
+ resultchecker_cls = BaseResultChecker.get_resultchecker_cls(
+ resultchecker_type)
+ resultchecker_ins = resultchecker_cls(cfg, context)
+ resultchecker_ins.key = cfg['key']
+ resultchecker_ins.setup()
+ self._result_checker_list.append(resultchecker_ins)
+
+ def __getitem__(self, item):
+ for obj in self._result_checker_list:
+ if(obj.key == item):
+ return obj
+ raise KeyError("No such result checker instance of key - %s" % item)
+
+ def verify(self):
+ result = True
+ for obj in self._result_checker_list:
+ result &= obj.success
+ return result
+
+
+class BaseResultChecker(object):
+
+ resultchecker_cfgs = {}
+
+ def __init__(self, config, context):
+ if not BaseResultChecker.resultchecker_cfgs:
+ with open(resultchecker_conf_path) as stream:
+ BaseResultChecker.resultchecker_cfgs = yaml.load(stream)
+ self.actualResult = object()
+ self.expectedResult = object()
+ self.success = False
+
+ self._config = config
+ self._context = context
+ self.setup_done = False
+
+ @staticmethod
+ def get_resultchecker_cls(type):
+ '''return resultchecker instance of specified type'''
+ resultchecker_type = type
+ for checker_cls in utils.itersubclasses(BaseResultChecker):
+ if resultchecker_type == checker_cls.__result_checker__type__:
+ return checker_cls
+ raise RuntimeError("No such runner_type %s" % resultchecker_type)
+
+ def get_script_fullpath(self, path):
+ base_path = os.path.dirname(resultchecker_conf_path)
+ return os.path.join(base_path, path)
+
+ def setup(self):
+ pass
+
+ def verify(self):
+ if(self.actualResult == self.expectedResult):
+ self.success = True
+ return self.success
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
new file mode 100644
index 000000000..70bf9aea6
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
@@ -0,0 +1,107 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from baseresultchecker import BaseResultChecker
+from yardstick.benchmark.scenarios.availability import Condition
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios.availability.util import buildshellparams
+
+LOG = logging.getLogger(__name__)
+
+
+class GeneralResultChecker(BaseResultChecker):
+
+ __result_checker__type__ = "general-result-checker"
+
+ def setup(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+ host = self._context.get(self._config['host'], None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+
+ self.key = self._config['key']
+ self.type = self._config['checker_type']
+ self.condition = self._config['condition']
+ self.expectedResult = self._config['expectedValue']
+ self.actualResult = object()
+
+ self.key = self._config['key']
+ if "parameter" in self._config:
+ parameter = self._config['parameter']
+ str = buildshellparams(parameter)
+ l = list(item for item in parameter.values())
+ self.shell_cmd = str.format(*l)
+
+ self.resultchecker_cfgs = BaseResultChecker.resultchecker_cfgs.get(
+ self.key)
+ self.verify_script = self.get_script_fullpath(
+ self.resultchecker_cfgs['verify_script'])
+
+ def verify(self):
+ if "parameter" in self._config:
+ exit_status, stdout, stderr = self.connection.execute(
+ self.shell_cmd,
+ stdin=open(self.verify_script, "r"))
+ LOG.debug("action script of the operation is: {0}"
+ .format(self.verify_script))
+ LOG.debug("action parameter the of operation is: {0}"
+ .format(self.shell_cmd))
+ else:
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/bash -s ",
+ stdin=open(self.verify_script, "r"))
+ LOG.debug("action script of the operation is: {0}"
+ .format(self.verify_script))
+
+ LOG.debug("exit_status ,stdout : {0} ,{1}".format(exit_status, stdout))
+ if exit_status == 0 and stdout:
+ self.actualResult = stdout
+ LOG.debug("verifying resultchecker: {0}".format(self.key))
+ LOG.debug("verifying resultchecker,expected: {0}"
+ .format(self.expectedResult))
+ LOG.debug("verifying resultchecker,actual: {0}"
+ .format(self.actualResult))
+ LOG.debug("verifying resultchecker,condition: {0}"
+ .format(self.condition))
+ if (type(self.expectedResult) is int):
+ self.actualResult = int(self.actualResult)
+ if self.condition == Condition.EQUAL:
+ self.success = self.actualResult == self.expectedResult
+ elif self.condition == Condition.GREATERTHAN:
+ self.success = self.actualResult > self.expectedResult
+ elif self.condition == Condition.GREATERTHANEQUAL:
+ self.success = self.actualResult >= self.expectedResult
+ elif self.condition == Condition.LESSTHANEQUAL:
+ self.success = self.actualResult <= self.expectedResult
+ elif self.condition == Condition.LESSTHAN:
+ self.success = self.actualResult < self.expectedResult
+ elif self.condition == Condition.IN:
+ self.success = self.expectedResult in self.actualResult
+ else:
+ self.success = False
+ LOG.debug(
+ "error happened when resultchecker: {0} Invalid condition"
+ .format(self.key))
+ else:
+ self.success = False
+ LOG.debug(
+ "error happened when resultchecker: {0} verifying the result"
+ .format(self.key))
+ LOG.error(stderr)
+
+ LOG.debug(
+ "verifying resultchecker: {0},the result is : {1}"
+ .format(self.key, self.success))
+ return self.success
diff --git a/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml b/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
new file mode 100644
index 000000000..638c39a6e
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
@@ -0,0 +1,11 @@
+---
+# sample config file for ha test
+#
+schema: "yardstick:task:0.1"
+
+process-checker:
+ verify_script: ha_tools/check_process_python.bash
+service-checker:
+ verify_script: ha_tools/check_service.bash
+nova-instance-checker:
+ verify_script: ha_tools/nova/show_instances.bash \ No newline at end of file