aboutsummaryrefslogtreecommitdiffstats
path: root/qtip/ansible_library/plugins/action/calculate.py
blob: fa459406693ce26a02f257e673264cdf531dff51 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

@media only all and (prefers-color-scheme: dark) {
.highlight .hll { background-color: #49483e }
.highlight .c { color: #75715e } /* Comment */
.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
.highlight .k { color: #66d9ef } /* Keyword */
.highlight .l { color: #ae81ff } /* Literal */
.highlight .n { color: #f8f8f2 } /* Name */
.highlight .o { color: #f92672 } /* Operator */
.highlight .p { color: #f8f8f2 } /* Punctuation */
.highlight .ch { color: #75715e } /* Comment.Hashbang */
.highlight .cm { color: #75715e } /* Comment.Multiline */
.highlight .cp { color: #75715e } /* Comment.Preproc */
.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
.highlight .c1 { color: #75715e } /* Comment.Single */
.highlight .cs { color: #75715e } /* Comment.Special */
.highlight .gd { color: #f92672 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gi { color: #a6e22e } /* Generic.Inserted */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #75715e } /* Generic.Subheading */
.highlight .kc { color: #66d9ef } /* Keyword.Constant */
.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
.highlight .kn { color: #f92672 } /* Keyword.Namespace */
.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
.highlight .kt { color: #66d9ef } /* Keyword.Type */
.highlight .ld { color: #e6db74 } /* Literal.Date */
.highlight .m { color: #ae81ff } /* Literal.Number */
.highlight .s { color: #e6db74 } /* Literal.String */
.highlight .na { color: #a6e22e } /* Name.Attribute */
.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
.highlight .nc { color: #a6e22e } /* Name.Class */
.highlight .no { color: #66d9ef } /* Name.Constant */
.highlight .nd { color: #a6e22e } /* Name.Decorator */
.highlight .ni { color: #f8f8f2 } /* Name.Entity */
.highlight .ne { color: #a6e22e } /* Name.Exception */
.highlight .nf { color: #a6e22e } /* Name.Function */
.highlight .nl { color: #f8f8f2 } /* Name.Label */
.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
.highlight .nx { color: #a6e22e } /* Name.Other */
.highlight .py { color: #f8f8f2 } /* Name.Property */
.highlight .nt { color: #f92672 } /* Name.Tag */
.highlight .nv { color: #f8f8f2 } /* Name.Variable */
.highlight .ow { color: #f92672 } /* Operator.Word */
.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
.highlight .sc { color: #e6db74 } /* Literal.String.Char */
.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
.highlight .se { color: #ae81ff } /* Literal.String.Escape */
.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
.highlight .sx { color: #e6db74 } /* Literal.String.Other */
.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
}
@media (prefers-color-scheme: light) {
.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.h
#!/usr/bin/python

###############################################################
# Copyright (c) 2017 ZTE Corporation
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################

import json
import numbers

from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from asq.initiators import query
import humanfriendly
from numpy import mean
import yaml

from qtip.util.export_to import export_to_file

display = Display()


class ActionModule(ActionBase):
    def run(self, tmp=None, task_vars=None):

        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        if result.get('skipped', False):
            return result

        with open(self._task.args.get('spec')) as stream:
            spec = yaml.safe_load(stream)

        metrics_files = self._task.args.get('metrics')
        metrics = {}
        for metric, filename in metrics_files.items():
            with open(filename) as f:
                metrics[metric] = json.load(f)

        with open(self._task.args.get('sysinfo')) as f:
            data = json.load(f)
            sysinfo = dict([(k['name'], data[k['name']][0]) for k in spec['system_info']])

        dest = self._task.args.get('dest')

        baseline_file = self._task.args.get('baseline')
        if baseline_file is not None:
            with open(baseline_file) as f:
                baseline = json.load(f)
                return calc_qpi(spec, metrics, sysinfo, baseline, dest=dest)
        else:
            return calc_qpi(spec, metrics, sysinfo, None, dest=dest)


@export_to_file
def calc_qpi(qpi_spec, metrics, sysinfo, qpi_baseline):
    display.vv("calculate QPI {}".format(qpi_spec['name']))
    display.vvv("spec: {}".format(qpi_spec))
    display.vvv("metrics: {}".format(metrics))
    display.vvv("baseline: {}".format(qpi_baseline))

    section_results = []
    qpi_score = 0
    if qpi_baseline:
        for s in qpi_spec['sections']:
            s_baseline = query(qpi_baseline['sections']).first(
                lambda section: section['name'] == s['name'])
            section_results.append(calc_section(s, metrics, s_baseline))

        # TODO(yujunz): use formula in spec
        qpi_score = int(
            mean([r['score'] for r in section_results]) * qpi_baseline['score'])
    else:
        for s in qpi_spec['sections']:
            section_results.append(calc_section(s, metrics))

    results = {
        'score': qpi_score,
        'name': qpi_spec['name'],
        'description': qpi_spec['description'],
        'system_info': sysinfo,
        'sections': section_results,
        'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
        'baseline': "https://git.opnfv.org/qtip/tree/resources/baselines/compute.json"
    }

    return results


def calc_section(section_spec, metrics, section_baseline=None):
    display.vv("calculate section {}".format(section_spec['name']))
    display.vvv("spec: {}".format(section_spec))
    display.vvv("metrics: {}".format(metrics))
    display.vvv("baseline: {}".format(section_baseline))

    metric_results = []
    section_score = 0
    if section_baseline:
        for m in section_spec['metrics']:
            m_baseline = query(section_baseline['metrics']).first(
                lambda metric: metric['name'] == m['name'])
            metric_results.append(calc_metric(m, metrics[m['name']], m_baseline))
        section_score = mean([r['score'] for r in metric_results])
    else:
        for m in section_spec['metrics']:
            metric_results.append(calc_metric(m, metrics[m['name']]))

    # TODO(yujunz): use formula in spec
    return {
        'score': section_score,
        'name': section_spec['name'],
        'description': section_spec.get('description', 'section'),
        'metrics': metric_results
    }


def calc_metric(metric_spec, metrics, metric_baseline=None):
    display.vv("calculate metric {}".format(metric_spec['name']))
    display.vvv("spec: {}".format(metric_spec))
    display.vvv("metrics: {}".format(metrics))
    display.vvv("baseline: {}".format(metric_baseline))

    # TODO(yujunz): use formula in spec
    workload_results = []
    metric_score = 0
    if metric_baseline:
        for w in metric_spec['workloads']:
            w_baseline = query(metric_baseline['workloads']).first(
                lambda workload: workload['name'] == w['name'])
            workload_results.append({
                'name': w['name'],
                'description': 'workload',
                'score': calc_score(metrics[w['name']], w_baseline['baseline']),
                'result': metrics[w['name']][0]
                })
        metric_score = mean([r['score'] for r in workload_results])
    else:
        for w in metric_spec['workloads']:
            workload_results.append({
                'name': w['name'],
                'baseline': metrics[w['name']][0]
            })

    return {
        'score': metric_score,
        'name': metric_spec['name'],
        'description': metric_spec.get('description', 'metric'),
        'workloads': workload_results
    }


def calc_score(metrics, baseline):
    if not isinstance(baseline, numbers.Number):
        baseline = humanfriendly.parse_size(baseline)

    return mean(
        [m if isinstance(m, numbers.Number) else humanfriendly.parse_size(m)
         for m in metrics]) / baseline