aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/scenarios/availability
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/scenarios/availability')
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py32
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py15
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py3
-rw-r--r--yardstick/benchmark/scenarios/availability/director.py22
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash4
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/add_server_to_existing_secgroup.bash26
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/create_instance_from_image.bash26
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_instance.bash24
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_privateip.bash24
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/remove_server_from_secgroup.bash25
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/start_service.bash13
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py12
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py28
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py20
-rw-r--r--yardstick/benchmark/scenarios/availability/operation_conf.yaml11
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py34
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py41
18 files changed, 228 insertions, 134 deletions
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
index 979e3ab14..4c79a4931 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -23,7 +23,7 @@ def _execute_shell_command(command, stdin=None):
output = []
try:
output = subprocess.check_output(command, stdin=stdin, shell=True)
- except Exception:
+ except Exception: # pylint: disable=broad-except
exitcode = -1
LOG.error("exec command '%s' error:\n ", command, exc_info=True)
@@ -34,6 +34,8 @@ class BaremetalAttacker(BaseAttacker):
__attacker_type__ = 'bare-metal-down'
def setup(self):
+ # baremetal down need to recover even sla pass
+ self.mandatory = True
LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
@@ -49,8 +51,7 @@ class BaremetalAttacker(BaseAttacker):
LOG.debug("jump_host ip:%s user:%s", jump_host['ip'], jump_host['user'])
self.jump_connection = ssh.SSH.from_node(
jump_host,
- # why do we allow pwd for password?
- defaults={"user": "root", "password": jump_host.get("pwd")}
+ defaults={"user": "root", "password": jump_host.get("password")}
)
self.jump_connection.wait(timeout=600)
LOG.debug("ssh jump host success!")
@@ -59,7 +60,7 @@ class BaremetalAttacker(BaseAttacker):
self.ipmi_ip = host.get("ipmi_ip", None)
self.ipmi_user = host.get("ipmi_user", "root")
- self.ipmi_pwd = host.get("ipmi_pwd", None)
+ self.ipmi_pwd = host.get("ipmi_password", None)
self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down')
self.check_script = self.get_script_fullpath(
@@ -107,26 +108,3 @@ class BaremetalAttacker(BaseAttacker):
else:
_execute_shell_command(cmd, stdin=stdin_file)
LOG.info("Recover fault END")
-
-
-def _test(): # pragma: no cover
- host = {
- "ipmi_ip": "10.20.0.5",
- "ipmi_user": "root",
- "ipmi_pwd": "123456",
- "ip": "10.20.0.5",
- "user": "root",
- "key_filename": "/root/.ssh/id_rsa"
- }
- context = {"node1": host}
- attacker_cfg = {
- 'fault_type': 'bear-metal-down',
- 'host': 'node1',
- }
- ins = BaremetalAttacker(attacker_cfg, context)
- ins.setup()
- ins.inject_fault()
-
-
-if __name__ == '__main__': # pragma: no cover
- _test()
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
index cb171eafa..7f1136c08 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -42,29 +42,28 @@ class ProcessAttacker(BaseAttacker):
def check(self):
with open(self.check_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ _, stdout, stderr = self.connection.execute(
"sudo /bin/sh -s {0}".format(self.service_name),
stdin=stdin_file)
if stdout:
- LOG.info("check the environment success!")
+ LOG.info("Check the environment success!")
return int(stdout.strip('\n'))
else:
- LOG.error(
- "the host environment is error, stdout:%s, stderr:%s",
- stdout, stderr)
+ LOG.error("Error checking the host environment, "
+ "stdout:%s, stderr:%s", stdout, stderr)
return False
def inject_fault(self):
with open(self.inject_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ self.connection.execute(
"sudo /bin/sh -s {0}".format(self.service_name),
stdin=stdin_file)
def recover(self):
with open(self.recovery_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ exit_status, _, _ = self.connection.execute(
"sudo /bin/bash -s {0} ".format(self.service_name),
stdin=stdin_file)
if exit_status:
- LOG.info("Fail to restart service!")
+ LOG.info("Failed to restart service: %s", self.recovery_script)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index d03d04420..7871cc918 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -63,6 +63,7 @@ class BaseAttacker(object):
self.data = {}
self.setup_done = False
self.intermediate_variables = {}
+ self.mandatory = False
@staticmethod
def get_attacker_cls(attacker_cfg):
@@ -71,7 +72,7 @@ class BaseAttacker(object):
for attacker_cls in utils.itersubclasses(BaseAttacker):
if attacker_type == attacker_cls.__attacker_type__:
return attacker_cls
- raise RuntimeError("No such runner_type %s" % attacker_type)
+ raise RuntimeError("No such runner_type: %s" % attacker_type)
def get_script_fullpath(self, path):
base_path = os.path.dirname(attacker_conf_path)
diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py
index 71690c135..6cc0cb286 100644
--- a/yardstick/benchmark/scenarios/availability/director.py
+++ b/yardstick/benchmark/scenarios/availability/director.py
@@ -40,7 +40,7 @@ class Director(object):
nodes = self.context_cfg.get("nodes", None)
# setup attackers
if "attackers" in self.scenario_cfg["options"]:
- LOG.debug("start init attackers...")
+ LOG.debug("Start init attackers...")
attacker_cfgs = self.scenario_cfg["options"]["attackers"]
self.attackerMgr = baseattacker.AttackerMgr()
self.data = self.attackerMgr.init_attackers(attacker_cfgs,
@@ -48,19 +48,19 @@ class Director(object):
# setup monitors
if "monitors" in self.scenario_cfg["options"]:
- LOG.debug("start init monitors...")
+ LOG.debug("Start init monitors...")
monitor_cfgs = self.scenario_cfg["options"]["monitors"]
self.monitorMgr = basemonitor.MonitorMgr(self.data)
self.monitorMgr.init_monitors(monitor_cfgs, nodes)
# setup operations
if "operations" in self.scenario_cfg["options"]:
- LOG.debug("start init operations...")
+ LOG.debug("Start init operations...")
operation_cfgs = self.scenario_cfg["options"]["operations"]
self.operationMgr = baseoperation.OperationMgr()
self.operationMgr.init_operations(operation_cfgs, nodes)
# setup result checker
if "resultCheckers" in self.scenario_cfg["options"]:
- LOG.debug("start init resultCheckers...")
+ LOG.debug("Start init resultCheckers...")
result_check_cfgs = self.scenario_cfg["options"]["resultCheckers"]
self.resultCheckerMgr = baseresultchecker.ResultCheckerMgr()
self.resultCheckerMgr.init_ResultChecker(result_check_cfgs, nodes)
@@ -69,7 +69,7 @@ class Director(object):
if intermediate_variables is None:
intermediate_variables = {}
LOG.debug(
- "the type of current action is %s, the key is %s", type, key)
+ "The type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionplayers.AttackerPlayer(self.attackerMgr[key], intermediate_variables)
if type == ActionType.MONITOR:
@@ -80,17 +80,17 @@ class Director(object):
if type == ActionType.OPERATION:
return actionplayers.OperationPlayer(self.operationMgr[key],
intermediate_variables)
- LOG.debug("something run when creatactionplayer")
+ LOG.debug("The type is not recognized by createActionPlayer")
def createActionRollbacker(self, type, key):
LOG.debug(
- "the type of current action is %s, the key is %s", type, key)
+ "The type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
if type == ActionType.OPERATION:
return actionrollbackers.OperationRollbacker(
self.operationMgr[key])
- LOG.debug("no rollbacker created for %s", key)
+ LOG.debug("No rollbacker created for key: %s", key)
def verify(self):
result = True
@@ -99,7 +99,7 @@ class Director(object):
if hasattr(self, 'resultCheckerMgr'):
result &= self.resultCheckerMgr.verify()
if result:
- LOG.debug("monitors are passed")
+ LOG.debug("Monitor results are passed")
return result
def stopMonitors(self):
@@ -107,12 +107,12 @@ class Director(object):
self.monitorMgr.wait_monitors()
def knockoff(self):
- LOG.debug("knock off ....")
+ LOG.debug("Knock off ....")
while self.executionSteps:
singleStep = self.executionSteps.pop()
singleStep.rollback()
def store_result(self, result):
- LOG.debug("store result ....")
+ LOG.debug("Store result ....")
if hasattr(self, 'monitorMgr'):
self.monitorMgr.store_result(result)
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
index d34ce9338..cda469cf9 100755
--- a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
@@ -16,7 +16,7 @@ set -e
process_name=$1
if [ "$process_name" = "keystone" ]; then
- for pid in $(ps aux | grep "keystone" | grep -iv heartbeat | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}'); \
+ for pid in $(ps aux | grep "keystone" | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}'); \
do
kill -9 "${pid}"
done
@@ -26,7 +26,7 @@ elif [ "$process_name" = "haproxy" ]; then
kill -9 "${pid}"
done
else
- for pid in $(pgrep -fa [^-_a-zA-Z0-9]${process_name} | grep -iv heartbeat | awk '{print $1}');
+ for pid in $(pgrep -fa [^-_a-zA-Z0-9]${process_name} | awk '{print $1}');
do
kill -9 "${pid}"
done
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/add_server_to_existing_secgroup.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/add_server_to_existing_secgroup.bash
new file mode 100644
index 000000000..3a50626f5
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/add_server_to_existing_secgroup.bash
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# add server to existing security group
+# parameters: $1 - server name, $2 - security group name
+
+set -e
+
+if [ $OS_INSECURE ] && [ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+SECGROUPNAME="$(openstack ${SECURE} security group list -f value -c Name | grep $2)"
+
+openstack ${SECURE} server add security group $1 ${SECGROUPNAME}
+
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_instance_from_image.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_instance_from_image.bash
new file mode 100644
index 000000000..5e0b1ccf1
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_instance_from_image.bash
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# create nova server
+# parameters: $1 - server name, $2 - image name, $3 - flavor name, $4 - network name
+
+set -e
+
+if [ $OS_INSECURE ] && [ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+NETNAME="$(openstack ${SECURE} network list -f value -c Name | grep $4)"
+
+openstack ${SECURE} server create $1 --image $2 --flavor $3 --network ${NETNAME}
+
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_instance.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_instance.bash
new file mode 100644
index 000000000..008e7f5ff
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_instance.bash
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# delete nova server
+# parameters: $1 - server name
+
+set -e
+
+if [ $OS_INSECURE ] && [ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack ${SECURE} server delete $1
+
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_privateip.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_privateip.bash
new file mode 100644
index 000000000..7f2bad540
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_privateip.bash
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# get private ip of a server
+# parameter: $1 - server name
+
+set -e
+
+if [ $OS_INSECURE ] && [ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack ${SECURE} server list -f value -c Name -c Networks | grep $1 | awk '{print $2}' | sed -r 's/.*=([0-9\.\:]+)[;,]*/\1/'
+
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/remove_server_from_secgroup.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/remove_server_from_secgroup.bash
new file mode 100644
index 000000000..61d0a2b49
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/remove_server_from_secgroup.bash
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# remove server from existing security group
+# parameters: $1 - server name, $2 - security group name
+
+set -e
+
+if [ $OS_INSECURE ] && [ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+SECGROUPNAME="$(openstack ${SECURE} security group list -f value -c Name | grep $2)"
+
+openstack ${SECURE} server remove security group $1 ${SECGROUPNAME}
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
index 858d86ca0..2388507d7 100755
--- a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
@@ -9,24 +9,23 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Start a service and check the service is started
+# Start or restart a service and check the service is started
set -e
service_name=$1
+operation=${2-start} # values are "start" or "restart"
-Distributor=$(lsb_release -a | grep "Distributor ID" | awk '{print $3}')
-
-if [ "$Distributor" != "Ubuntu" -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" ]; then
+if [ -f /usr/bin/yum -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" -a "$service_name" != "openvswitch" ]; then
service_name="openstack-"${service_name}
-elif [ "$Distributor" = "Ubuntu" -a "$service_name" = "keystone" ]; then
+elif [ -f /usr/bin/apt -a "$service_name" = "keystone" ]; then
service_name="apache2"
elif [ "$service_name" = "keystone" ]; then
service_name="httpd"
fi
if which systemctl 2>/dev/null; then
- systemctl start $service_name
+ systemctl $operation $service_name
else
- service $service_name start
+ service $service_name $operation
fi
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
index 50a63f53d..f6004c774 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -103,7 +103,7 @@ class BaseMonitor(multiprocessing.Process):
for monitor in utils.itersubclasses(BaseMonitor):
if monitor_type == monitor.__monitor_type__:
return monitor
- raise RuntimeError("No such monitor_type %s" % monitor_type)
+ raise RuntimeError("No such monitor_type: %s" % monitor_type)
def get_script_fullpath(self, path):
base_path = os.path.dirname(monitor_conf_path)
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
index d0551bf03..3b36c762d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -24,7 +24,7 @@ def _execute_shell_command(command):
output = []
try:
output = subprocess.check_output(command, shell=True)
- except Exception:
+ except Exception: # pylint: disable=broad-except
exitcode = -1
LOG.error("exec command '%s' error:\n ", command, exc_info=True)
@@ -45,7 +45,7 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.connection = ssh.SSH.from_node(host,
defaults={"user": "root"})
self.connection.wait(timeout=600)
- LOG.debug("ssh host success!")
+ LOG.debug("ssh host (%s) success!", str(host))
self.check_script = self.get_script_fullpath(
"ha_tools/check_openstack_cmd.bash")
@@ -61,22 +61,20 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.cmd = self.cmd + " --insecure"
def monitor_func(self):
- exit_status = 0
exit_status, stdout = _execute_shell_command(self.cmd)
- LOG.debug("Execute command '%s' and the stdout is:\n%s", self.cmd, stdout)
+ LOG.debug("Executed command '%s'. "
+ "The stdout is:\n%s", self.cmd, stdout)
if exit_status:
return False
return True
def verify_SLA(self):
outage_time = self._result.get('outage_time', None)
- LOG.debug("the _result:%s", self._result)
max_outage_time = self._config["sla"]["max_outage_time"]
if outage_time > max_outage_time:
LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
- LOG.info("the sla is passed")
return True
@@ -97,7 +95,7 @@ def _test(): # pragma: no cover
}
monitor_configs.append(config)
- p = basemonitor.MonitorMgr()
+ p = basemonitor.MonitorMgr({})
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
index dce69f45f..8f1f53cde 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
@@ -29,7 +29,7 @@ class MultiMonitor(basemonitor.BaseMonitor):
monitor_cls = basemonitor.BaseMonitor.get_monitor_cls(monitor_type)
monitor_number = self._config.get("monitor_number", 1)
- for i in range(monitor_number):
+ for _ in range(monitor_number):
monitor_ins = monitor_cls(self._config, self._context,
self.monitor_data)
self.monitors.append(monitor_ins)
@@ -62,19 +62,19 @@ class MultiMonitor(basemonitor.BaseMonitor):
outage_time = (
last_outage - first_outage if last_outage > first_outage else 0
)
+ self._result = {"outage_time": outage_time}
LOG.debug("outage_time is: %f", outage_time)
max_outage_time = 0
- if "max_outage_time" in self._config["sla"]:
- max_outage_time = self._config["sla"]["max_outage_time"]
- elif "max_recover_time" in self._config["sla"]:
- max_outage_time = self._config["sla"]["max_recover_time"]
- else:
- raise RuntimeError("monitor max_outage_time config is not found")
- self._result = {"outage_time": outage_time}
-
- if outage_time > max_outage_time:
- LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
- return False
- else:
- return True
+ if self._config.get("sla"):
+ if "max_outage_time" in self._config["sla"]:
+ max_outage_time = self._config["sla"]["max_outage_time"]
+ elif "max_recover_time" in self._config["sla"]:
+ max_outage_time = self._config["sla"]["max_recover_time"]
+ else:
+ raise RuntimeError("'max_outage_time' or 'max_recover_time' "
+ "config is not found")
+ if outage_time > max_outage_time:
+ LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
+ return False
+ return True
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
index b0f6f8e9d..280e5811d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -25,14 +25,14 @@ class MonitorProcess(basemonitor.BaseMonitor):
self.connection = ssh.SSH.from_node(host, defaults={"user": "root"})
self.connection.wait(timeout=600)
- LOG.debug("ssh host success!")
+ LOG.debug("ssh host (%s) success!", str(host))
self.check_script = self.get_script_fullpath(
"ha_tools/check_process_python.bash")
self.process_name = self._config["process_name"]
def monitor_func(self):
with open(self.check_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ _, stdout, _ = self.connection.execute(
"sudo /bin/sh -s {0}".format(self.process_name),
stdin=stdin_file)
@@ -45,15 +45,13 @@ class MonitorProcess(basemonitor.BaseMonitor):
return True
def verify_SLA(self):
- LOG.debug("the _result:%s", self._result)
outage_time = self._result.get('outage_time', None)
- max_outage_time = self._config["sla"]["max_recover_time"]
- if outage_time > max_outage_time:
- LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
- return False
- else:
- LOG.info("the sla is passed")
- return True
+ if self._config.get("sla"):
+ max_outage_time = self._config["sla"]["max_recover_time"]
+ if outage_time > max_outage_time:
+ LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
+ return False
+ return True
def _test(): # pragma: no cover
@@ -73,7 +71,7 @@ def _test(): # pragma: no cover
}
monitor_configs.append(config)
- p = basemonitor.MonitorMgr()
+ p = basemonitor.MonitorMgr({})
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
diff --git a/yardstick/benchmark/scenarios/availability/operation_conf.yaml b/yardstick/benchmark/scenarios/availability/operation_conf.yaml
index dc5169196..5f3f6c91e 100644
--- a/yardstick/benchmark/scenarios/availability/operation_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/operation_conf.yaml
@@ -35,3 +35,14 @@ get-vip-host:
action_script: ha_tools/pacemaker/get_vip_host.bash
rollback_script: ha_tools/pacemaker/get_resource_status.bash
+start-service:
+ action_script: ha_tools/start_service.bash
+ rollback_script: ha_tools/check_process_python.bash
+
+add-server-to-secgroup:
+ action_script: ha_tools/nova/add_server_to_existing_secgroup.bash
+ rollback_script: ha_tools/nova/remove_server_from_secgroup.bash
+
+get-privateip:
+ action_script: ha_tools/nova/get_server_privateip.bash
+ rollback_script: ha_tools/nova/list_servers.bash
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 9ac55471d..e2db03a70 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -26,7 +26,6 @@ class ScenarioGeneral(base.Scenario):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.intermediate_variables = {}
- self.pass_flag = True
def setup(self):
self.director = Director(self.scenario_cfg, self.context_cfg)
@@ -47,7 +46,7 @@ class ScenarioGeneral(base.Scenario):
step['actionType'], step['actionKey'])
if actionRollbacker:
self.director.executionSteps.append(actionRollbacker)
- except Exception:
+ except Exception: # pylint: disable=broad-except
LOG.exception("Exception")
LOG.debug(
"\033[91m exception when running step: %s .... \033[0m",
@@ -59,31 +58,20 @@ class ScenarioGeneral(base.Scenario):
self.director.stopMonitors()
verify_result = self.director.verify()
-
- self.director.store_result(result)
-
+ service_not_found = False
for k, v in self.director.data.items():
if v == 0:
- result['sla_pass'] = 0
verify_result = False
- self.pass_flag = False
- LOG.info(
- "\033[92m The service process not found in the host \
-envrioment, the HA test case NOT pass")
+ service_not_found = True
+ LOG.info("\033[92m The service process (%s) not found in the host environment", k)
- if verify_result:
- result['sla_pass'] = 1
- LOG.info(
- "\033[92m Congratulations, "
- "the HA test case PASS! \033[0m")
- else:
- result['sla_pass'] = 0
- self.pass_flag = False
- LOG.info(
- "\033[91m Aoh, the HA test case FAIL,"
- "please check the detail debug information! \033[0m")
+ result['sla_pass'] = 1 if verify_result else 0
+ self.director.store_result(result)
+
+ self.verify_SLA(
+ verify_result, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "Director.verify() failed"))
def teardown(self):
self.director.knockoff()
-
- assert self.pass_flag, "The HA test case NOT passed"
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 6d0d812af..fdfe7cbbe 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -29,13 +29,13 @@ class ServiceHA(base.Scenario):
self.context_cfg = context_cfg
self.setup_done = False
self.data = {}
- self.pass_flag = True
+ self.sla_pass = False
def setup(self):
"""scenario setup"""
nodes = self.context_cfg.get("nodes", None)
if nodes is None:
- LOG.error("the nodes info is none")
+ LOG.error("The nodes info is none")
return
self.attackers = []
@@ -58,43 +58,40 @@ class ServiceHA(base.Scenario):
def run(self, result):
"""execute the benchmark"""
if not self.setup_done:
- LOG.error("The setup not finished!")
+ LOG.error("The setup is not finished!")
return
self.monitorMgr.start_monitors()
- LOG.info("HA monitor start!")
+ LOG.info("Monitor '%s' start!", self.__scenario_type__)
for attacker in self.attackers:
attacker.inject_fault()
self.monitorMgr.wait_monitors()
- LOG.info("HA monitor stop!")
+ LOG.info("Monitor '%s' stop!", self.__scenario_type__)
- sla_pass = self.monitorMgr.verify_SLA()
+ self.sla_pass = self.monitorMgr.verify_SLA()
+ service_not_found = False
for k, v in self.data.items():
if v == 0:
- result['sla_pass'] = 0
- self.pass_flag = False
- LOG.info("The service process not found in the host envrioment, \
-the HA test case NOT pass")
- return
+ self.sla_pass = False
+ service_not_found = True
+ LOG.info("The service process (%s) not found in the host envrioment", k)
+
+ result['sla_pass'] = 1 if self.sla_pass else 0
self.monitorMgr.store_result(result)
- if sla_pass:
- result['sla_pass'] = 1
- LOG.info("The HA test case PASS the SLA")
- else:
- result['sla_pass'] = 0
- self.pass_flag = False
- assert sla_pass is True, "The HA test case NOT pass the SLA"
- return
+ self.verify_SLA(
+ self.sla_pass, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "MonitorMgr.verify_SLA() failed"))
def teardown(self):
"""scenario teardown"""
+ # recover when mandatory or sla not pass
for attacker in self.attackers:
- attacker.recover()
-
- assert self.pass_flag, "The HA test case NOT passed"
+ if attacker.mandatory or not self.sla_pass:
+ attacker.recover()
def _test(): # pragma: no cover