summaryrefslogtreecommitdiffstats
path: root/commons/traffic-profile-guidelines.rst
blob: 0b965b1568619f359eff0b9bbf83ffaa960b0fe2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
================================
OPNFV traffic profile guidelines
================================

.. contents::

.. _introduction:

------------
Introduction
------------

In order to have consistent testing profiles, it has been suggested to define and store traffic profiles.
These profiles shall be based on operator representative scenario. 

These reference profiles may be used by any test projects, unitary, functional or performance tests. 
It is possible to adapt them to specific testcases. 
It is recommended to use them in order to avoid getting as many profiles as tests. 
It should be helpful to compare the results of test scenario.

.. _howto:

-------------------------
How to use these profiles
-------------------------

The directory of the traffic profiles may be described as follow::

 ├── commons
   ├── ims
   │   └── readme.rst
   ├── mobile
   │   └── readme.rst
   └── traffic-profile-guidelines.rst

the readme.rst details the profile.


.. _overview:

------------------------
Traffic profile overview
------------------------

The current profiles are available:
 * Mobile traffic
 * IMS residential traffic
 * ...

Mobile traffic
==============

IMS residential traffic
=======================





.. _reference:

---------
reference
---------
ff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
#!/usr/bin/env python

##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################

# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench

from __future__ import absolute_import

import unittest

import mock
from oslo_serialization import jsonutils

from yardstick.benchmark.scenarios.compute import lmbench


@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh')
class LmbenchTestCase(unittest.TestCase):

    def setUp(self):
        self.ctx = {
            'host': {
                'ip': '172.16.0.137',
                'user': 'cirros',
                'key_filename': "mykey.key"
            }
        }

        self.result = {}

    def test_successful_setup(self, mock_ssh):

        l = lmbench.Lmbench({}, self.ctx)
        mock_ssh.SSH.from_node().execute.return_value = (0, '', '')

        l.setup()
        self.assertIsNotNone(l.client)
        self.assertTrue(l.setup_done)

    def test_unsuccessful_unknown_type_run(self, mock_ssh):

        options = {
            "test_type": "foo"
        }
        args = {'options': options}

        l = lmbench.Lmbench(args, self.ctx)

        self.assertRaises(RuntimeError, l.run, self.result)

    def test_successful_latency_run_no_sla(self, mock_ssh):

        options = {
            "test_type": "latency",
            "stride": 64,
            "stop_size": 16
        }
        args = {'options': options}
        l = lmbench.Lmbench(args, self.ctx)

        sample_output = '[{"latency": 4.944, "size": 0.00049}]'
        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        l.run(self.result)
        expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049}
        self.assertEqual(self.result, expected_result)

    def test_successful_bandwidth_run_no_sla(self, mock_ssh):

        options = {
            "test_type": "bandwidth",
            "size": 500,
            "benchmark": "rd",
            "warmup": 0
        }
        args = {"options": options}
        l = lmbench.Lmbench(args, self.ctx)

        sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        l.run(self.result)
        expected_result = jsonutils.loads(sample_output)
        self.assertEqual(self.result, expected_result)

    def test_successful_latency_run_sla(self, mock_ssh):

        options = {
            "test_type": "latency",
            "stride": 64,
            "stop_size": 16
        }
        args = {
            "options": options,
            "sla": {"max_latency": 35}
        }
        l = lmbench.Lmbench(args, self.ctx)

        sample_output = '[{"latency": 4.944, "size": 0.00049}]'
        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        l.run(self.result)
        expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049}
        self.assertEqual(self.result, expected_result)

    def test_successful_bandwidth_run_sla(self, mock_ssh):

        options = {
            "test_type": "bandwidth",
            "size": 500,
            "benchmark": "rd",
            "warmup": 0
        }
        args = {
            "options": options,
            "sla": {"min_bandwidth": 10000}
        }
        l = lmbench.Lmbench(args, self.ctx)

        sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        l.run(self.result)
        expected_result = jsonutils.loads(sample_output)
        self.assertEqual(self.result, expected_result)

    def test_unsuccessful_latency_run_sla(self, mock_ssh):

        options = {
            "test_type": "latency",
            "stride": 64,
            "stop_size": 16
        }
        args = {
            "options": options,
            "sla": {"max_latency": 35}
        }
        l = lmbench.Lmbench(args, self.ctx)

        sample_output = '[{"latency": 37.5, "size": 0.00049}]'
        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        self.assertRaises(AssertionError, l.run, self.result)

    def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):

        options = {
            "test_type": "bandwidth",
            "size": 500,
            "benchmark": "rd",
            "warmup": 0
        }
        args = {
            "options": options,
            "sla": {"min_bandwidth": 10000}
        }
        l = lmbench.Lmbench(args, self.ctx)

        sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        self.assertRaises(AssertionError, l.run, self.result)

    def test_successful_latency_for_cache_run_sla(self, mock_ssh):

        options = {
            "test_type": "latency_for_cache",
            "repetition": 1,
            "warmup": 0
        }
        args = {
            "options": options,
            "sla": {"max_latency": 35}
        }
        l = lmbench.Lmbench(args, self.ctx)

        sample_output = "{\"L1cache\": 1.6}"
        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        l.run(self.result)
        expected_result = jsonutils.loads(sample_output)
        self.assertEqual(self.result, expected_result)

    def test_unsuccessful_script_error(self, mock_ssh):

        options = {"test_type": "bandwidth"}
        args = {"options": options}
        l = lmbench.Lmbench(args, self.ctx)

        mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
        self.assertRaises(RuntimeError, l.run, self.result)


def main():
    unittest.main()

if __name__ == '__main__':
    main()