summaryrefslogtreecommitdiffstats
path: root/testsuites/posca/testcase_script/posca_feature_testpmd_scale_up.py
blob: 08c4cbe91ac6a17a9fd7cb23b3b2020c35556607 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
'''This file realize the function of run systembandwidth script.
for example this contain two part first run_script,
second is algorithm, this part is about how to judge the bottlenecks.
This test is using yardstick as a tool to begin test.'''

import os
import time
import uuid
import json
import utils.logger as log
from utils.parser import Parser as conf_parser
import utils.env_prepare.stack_prepare as stack_prepare
import utils.infra_setup.runner.docker_env as docker_env
import utils.infra_setup.runner.yardstick as yardstick_task

# --------------------------------------------------
# logging configuration
# --------------------------------------------------
LOG = log.Logger(__name__).getLogger()

testfile = os.path.basename(__file__)
testcase, file_format = os.path.splitext(testfile)
cidr = "/home/opnfv/repos/yardstick/samples/pvp_throughput_bottlenecks.yaml"
runner_DEBUG = True


def env_pre(con_dic):
    LOG.info("yardstick environment prepare!")
    stack_prepare._prepare_env_daemon(True)


def config_to_result(test_config, test_result):
    final_data = []
    print(test_result)
    out_data = test_result["result"]["testcases"]
    test_data = out_data["pvp_throughput_bottlenecks"]["tc_data"]
    for result in test_data:
        testdata = {}
        testdata["vcpu"] = test_config["vcpu"]
        testdata["memory"] = test_config["memory"]
        testdata["nrFlows"] = result["data"]["nrFlows"]
        testdata["packet_size"] = result["data"]["packet_size"]
        testdata["throughput"] = result["data"]["throughput_rx_mbps"]
        final_data.append(testdata)
    return final_data


def testcase_parser(out_file="yardstick.out", **parameter_info):
    cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
                                                  cidr=cidr,
                                                  outfile=out_file,
                                                  parameter=parameter_info)
    return cmd


def do_test(test_config, Use_Dashboard, context_conf):
    yardstick_container = docker_env.yardstick_info['container']
    out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
    cmd = testcase_parser(out_file=out_file, **test_config)
    print(cmd)
    stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
    LOG.info(stdout)
    loop_value = 0
    while loop_value < 60:
        time.sleep(2)
        loop_value = loop_value + 1
        with open(out_file) as f:
            data = json.load(f)
            if data["status"] == 1:
                LOG.info("yardstick run success")
                break
            elif data["status"] == 2:
                LOG.error("yardstick error exit")
                exit()

    save_data = config_to_result(test_config, data)
    if Use_Dashboard is True:
        print("use dashboard")
    return save_data


def run(test_config):
    load_config = test_config["load_manager"]
    scenarios_conf = load_config["scenarios"]
    Use_Dashboard = False

    env_pre(None)
    if test_config["contexts"]["yardstick_ip"] is None:
        load_config["contexts"]["yardstick_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    if "dashboard" in test_config["contexts"].keys():
        if test_config["contexts"]["dashboard_ip"] is None:
            test_config["contexts"]["dashboard_ip"] =\
                conf_parser.ip_parser("dashboard")
        LOG.info("Create Dashboard data")
        Use_Dashboard = True

    cpus = conf_parser.str_to_list(scenarios_conf["cpus"])
    mems = conf_parser.str_to_list(scenarios_conf["mems"])
    pkt_size = conf_parser.str_to_list(scenarios_conf["pkt_size"])
    multistream = conf_parser.str_to_list(scenarios_conf["multistream"])
    search_interval = scenarios_conf["search_interval"]

    load_config["result_file"] = os.path.dirname(
        os.path.abspath(__file__)) + "/test_case/result"

    if len(cpus) != len(mems):
        LOG.error("the cpus and mems config data number is not same!")
        os._exit()

    result = []

    for i in range(0, len(cpus)):
        case_config = {"vcpu": cpus[i],
                       "memory": int(mems[i]) * 1024,
                       "multistreams": multistream,
                       "pktsize": pkt_size,
                       "search_interval": search_interval}

        data_reply = do_test(case_config, Use_Dashboard,
                             test_config["contexts"])
        result.append(data_reply)

    LOG.info("Finished bottlenecks testcase")
    LOG.info("The result data is %s", result)
    return result