summaryrefslogtreecommitdiffstats
path: root/dovetail/test_runner.py
diff options
context:
space:
mode:
authorLeo Wang <grakiss.wanglei@huawei.com>2016-12-21 02:34:44 -0500
committerLeo wang <grakiss.wanglei@huawei.com>2016-12-22 02:14:26 +0000
commit76be7f7c6b2921aad6a68504a2020fb032eb5fde (patch)
treed46b86eff31cd11fc50b6cdc9f45965e14a919b8 /dovetail/test_runner.py
parent1124a453feb24308f58bda363c229f632cafd82f (diff)
[dovetail tool]check and get results for each cmd
JIRA: DOVETAIL-166 Check the results of each cmds executed in test case 1. the results of pre_condition, post_condition, cmds need to be checked, so it can get a quick fail, dont need to go through the next step 2. it's more accurate to show where error occurred as early as possible 3. get results from shell scripts Change-Id: I5c1e59839c55b92de0e83e7e1eb552aa364b3f80 Signed-off-by: Leo Wang <grakiss.wanglei@huawei.com>
Diffstat (limited to 'dovetail/test_runner.py')
-rw-r--r--dovetail/test_runner.py78
1 files changed, 67 insertions, 11 deletions
diff --git a/dovetail/test_runner.py b/dovetail/test_runner.py
index 8a95b1f7..70dd2345 100644
--- a/dovetail/test_runner.py
+++ b/dovetail/test_runner.py
@@ -7,8 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+import os
+import json
import utils.dovetail_utils as dt_utils
import utils.dovetail_logger as dt_logger
+from utils.dovetail_config import DovetailConfig as dt_cfg
from container import Container
@@ -19,34 +22,48 @@ class DockerRunner(object):
def __init__(self, testcase):
self.testcase = testcase
+ self.logger.debug('create runner: %s', self.type)
@classmethod
def create_log(cls):
- cls.logger = dt_logger.Logger(__file__).getLogger()
+ cls.logger = dt_logger.Logger(__name__ + '.DockerRunner').getLogger()
def run(self):
Container.pull_image(self.testcase.validate_type())
container_id = Container.create(self.testcase.validate_type())
+ if container_id == '':
+ self.logger.error('failed to create container')
+ return
+
self.logger.debug('container id:%s' % container_id)
if not self.testcase.prepared():
+ failed = False
cmds = self.testcase.pre_condition()
if cmds:
for cmd in cmds:
- Container.exec_cmd(container_id, cmd)
- self.testcase.prepared(True)
+ ret, msg = Container.exec_cmd(container_id, cmd)
+ if ret != 0:
+ failed = True
+ break
+ if not failed:
+ self.testcase.prepared(True)
if not self.testcase.prepare_cmd():
self.logger.error('failed to prepare testcase:%s',
self.testcase.name)
else:
for cmd in self.testcase.cmds:
- Container.exec_cmd(container_id, cmd)
+ ret, msg = Container.exec_cmd(container_id, cmd)
+ if ret != 0:
+ self.logger.error('Failed to exec %s, ret:%d, msg:%s',
+ cmd, ret, msg)
+ break
cmds = self.testcase.post_condition()
if cmds:
for cmd in cmds:
- Container.exec_cmd(container_id, cmd)
+ ret, msg = Container.exec_cmd(container_id, cmd)
self.testcase.cleaned(True)
Container.clean(container_id)
@@ -55,15 +72,15 @@ class DockerRunner(object):
class FunctestRunner(DockerRunner):
def __init__(self, testcase):
+ self.type = 'functest'
super(FunctestRunner, self).__init__(testcase)
- self.name = 'functest'
class YardstickRunner(DockerRunner):
def __init__(self, testcase):
+ self.type = 'yardstick'
super(YardstickRunner, self).__init__(testcase)
- self.name = 'yardstick'
class ShellRunner(object):
@@ -72,16 +89,55 @@ class ShellRunner(object):
@classmethod
def create_log(cls):
- cls.logger = dt_logger.Logger(__file__).getLogger()
+ cls.logger = dt_logger.Logger(__name__ + '.ShellRunner').getLogger()
def __init__(self, testcase):
super(ShellRunner, self).__init__()
self.testcase = testcase
- self.name = 'shell'
+ self.type = 'shell'
+ self.logger.debug('create runner:%s', self.type)
def run(self):
- for cmd in self.testcase.cmds:
- dt_utils.exec_cmd(cmd, self.logger)
+ passed = True
+ failed = False
+ result = {'pass': True, 'results': []}
+ if not self.testcase.prepared():
+ cmds = self.testcase.pre_condition()
+ for cmd in cmds:
+ ret, msg = dt_utils.exec_cmd(cmd, self.logger)
+ result['results'].append((cmd, ret, msg))
+ if ret != 0:
+ failed = True
+ break
+ if not failed:
+ self.testcase.prepared(True)
+
+ if not self.testcase.prepare_cmd():
+ self.logger.error('failed to prepare cmd:%s',
+ self.testcase.name())
+ else:
+ for cmd in self.testcase.cmds:
+ ret, msg = dt_utils.exec_cmd(cmd, self.logger)
+ result['results'].append((cmd, ret, msg))
+ if ret != 0:
+ passed = False
+
+ result['pass'] = passed
+
+ cmds = self.testcase.post_condition()
+ for cmd in cmds:
+ ret, msg = dt_utils.exec_cmd(cmd, self.logger)
+ result['results'].append((cmd, ret, msg))
+
+ result_filename = os.path.join(dt_cfg.dovetail_config['result_dir'],
+ self.testcase.name()) + '.out'
+ self.logger.debug('save result:%s', result_filename)
+ try:
+ with open(result_filename, 'w') as f:
+ f.write(json.dumps(result))
+ except Exception as e:
+ self.logger.exception('Failed to write result into file:%s, \
+ except:%s', result_filename, e)
class TestRunnerFactory(object):