aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/main.py
blob: f270bf9eeb24a3fbe1a70051f853afa2f9d64827 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
#!/usr/bin/env python

##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################

""" yardstick - command line tool for managing benchmarks

    Example invocation:
    $ yardstick samples/ping-task.yaml
"""

import sys
import yaml
import atexit
import pkg_resources

from yardstick.benchmark.context.model import Context
from yardstick.benchmark.runners import base as base_runner
from yardstick.cmdparser import CmdParser
from yardstick.orchestrator.heat import HeatStack


class TaskParser(object):
    '''Parser for task config files in yaml format'''
    def __init__(self, path):
        self.path = path

    def parse(self):
        '''parses the task file and return an context and scenario instances'''
        print "Parsing task config:", self.path
        try:
            with open(self.path) as stream:
                cfg = yaml.load(stream)
        except IOError as ioerror:
            sys.exit(ioerror)

        if cfg["schema"] != "yardstick:task:0.1":
            sys.exit("error: file %s has unknown schema %s" % (self.path,
                                                               cfg["schema"]))
        context = Context()
        context.init(cfg["context"])

        run_in_parallel = cfg.get("run_in_parallel", False)

        # TODO we need something better here, a class that represent the file
        return cfg["scenarios"], run_in_parallel, context


def atexit_handler():
    '''handler for process termination'''
    if HeatStack.stacks_exist():
        print "Deleting all stacks"
    HeatStack.delete_all()


def run_one_scenario(scenario_cfg, context, output_file):
    '''run one scenario using context'''
    key_filename = pkg_resources.resource_filename(
        'yardstick.resources', 'files/yardstick_key')

    host = context.get_server(scenario_cfg["host"])

    runner_cfg = scenario_cfg["runner"]
    runner_cfg['host'] = host.floating_ip["ipaddr"]
    runner_cfg['user'] = context.user
    runner_cfg['key_filename'] = key_filename
    runner_cfg['output_filename'] = output_file

    target = context.get_server(scenario_cfg["target"])
    if target.floating_ip:
        runner_cfg['target'] = target.floating_ip["ipaddr"]

    # TODO hardcoded name below, a server can be attached to several nets
    scenario_cfg["ipaddr"] = target.ports["test"]["ipaddr"]

    runner = base_runner.Runner.get(runner_cfg)

    print "Starting runner of type '%s'" % runner_cfg["type"]
    runner.run(scenario_cfg["type"], scenario_cfg)

    return runner


def main():
    '''yardstick main'''

    atexit.register(atexit_handler)

    prog_args = CmdParser().parse_args()

    parser = TaskParser(prog_args.taskfile[0])
    scenarios, run_in_parallel, context = parser.parse()

    if prog_args.parse_only:
        sys.exit(0)

    context.deploy()

    runners = []
    if run_in_parallel:
        for scenario in scenarios:
            runner = run_one_scenario(scenario, context, prog_args.output_file)
            runners.append(runner)

        # Wait for runners to finish
        for runner in runners:
            runner.join()
            print "Runner ended, output in", prog_args.output_file
            base_runner.Runner.release(runner)
    else:
        # run serially
        for scenario in scenarios:
            runner = run_one_scenario(scenario, context, prog_args.output_file)
            runner.join()
            print "Runner ended, output in", prog_args.output_file
            base_runner.Runner.release(runner)

    if prog_args.keep_deploy:
        # keep deployment, forget about stack (hide it for exit handler)
        context.stack = None
    else:
        context.undeploy()

    print "Done, exiting"

if __name__ == '__main__':
    main()