summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml87
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml43
-rw-r--r--tests/opnfv/test_suites/fuel_test_suite.yaml12
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vsperf.py132
4 files changed, 274 insertions, 0 deletions
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml
new file mode 100644
index 000000000..d7406832d
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml
@@ -0,0 +1,87 @@
+---
+# Yardstick TC044 config file
+# Measure memory usage statistics, network throughput, latency and packet loss.
+# Different amounts of flows are tested with, from 2 up to 1001000.
+# All tests are run 2 times each. First 2 times with the least
+# amount of ports, then 2 times with the next amount of ports,
+# and so on until all packet sizes have been run with.
+#
+# During the measurements memory usage statistics and network latency are
+# recorded/measured using sar and ping, respectively.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: MEMORYload
+ run_in_background: true
+
+ options:
+ interval: 1
+ count: 1
+
+ host: demeter.yardstick-TC044
+-
+ type: MEMORYload
+ run_in_background: true
+
+ options:
+ interval: 1
+ count: 1
+
+ host: poseidon.yardstick-TC044
+-
+ type: Ping
+ run_in_background: true
+
+ options:
+ packetsize: 100
+
+ host: demeter.yardstick-TC044
+ target: poseidon.yardstick-TC044
+
+ sla:
+ max_rtt: 10
+ action: monitor
+{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %}
+-
+ type: Pktgen
+ options:
+ packetsize: 64
+ number_of_ports: {{num_ports}}
+ duration: 20
+
+ host: demeter.yardstick-TC044
+ target: poseidon.yardstick-TC044
+
+ runner:
+ type: Iteration
+ iterations: 2
+ interval: 1
+
+ sla:
+ max_ppm: 1000
+ action: monitor
+{% endfor %}
+
+context:
+ name: yardstick-TC044
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ubuntu
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ demeter:
+ floating_ip: true
+ placement: "pgrp1"
+ poseidon:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml
new file mode 100644
index 000000000..812d53dd8
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml
@@ -0,0 +1,43 @@
+---
+# Test case for TC045 :Control node Openstack service down - neutron server
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "kill-process"
+ process_name: "neutron-server"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "neutron agent-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "process"
+ process_name: "neutron-server"
+ host: node1
+ monitor_time: 10
+ sla:
+ max_recover_time: 5
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
+
diff --git a/tests/opnfv/test_suites/fuel_test_suite.yaml b/tests/opnfv/test_suites/fuel_test_suite.yaml
new file mode 100644
index 000000000..016bf0953
--- /dev/null
+++ b/tests/opnfv/test_suites/fuel_test_suite.yaml
@@ -0,0 +1,12 @@
+---
+# Fuel integration test task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "fuel_test_suite"
+test_cases_dir: "samples/"
+test_cases:
+-
+ file_name: ping.yaml
+-
+ file_name: iperf3.yaml
diff --git a/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/tests/unit/benchmark/scenarios/networking/test_vsperf.py
new file mode 100644
index 000000000..cb5c09ab3
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+
+# Copyright 2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Unittest for yardstick.benchmark.scenarios.networking.vsperf.Vsperf
+
+import mock
+import unittest
+import os
+import subprocess
+
+from yardstick.benchmark.scenarios.networking import vsperf
+
+
+@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.subprocess')
+@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.ssh')
+@mock.patch("__builtin__.open", return_value=None)
+class VsperfTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ "host": {
+ "ip": "10.229.47.137",
+ "user": "ubuntu",
+ "password": "ubuntu",
+ },
+ }
+ self.args = {
+ 'options': {
+ 'testname': 'rfc2544_p2p_continuous',
+ 'traffic_type': 'continuous',
+ 'pkt_sizes': '64',
+ 'bidirectional': 'True',
+ 'iload': 100,
+ 'duration': 29,
+ 'trafficgen_port1': 'eth1',
+ 'trafficgen_port2': 'eth3',
+ 'external_bridge': 'br-ex',
+ 'conf-file': 'vsperf-yardstick.conf',
+ 'setup-script': 'setup_yardstick.sh',
+ },
+ 'sla': {
+ 'metrics': 'throughput_rx_fps',
+ 'throughput_rx_fps': 500000,
+ 'action': 'monitor',
+ }
+ }
+
+ def test_vsperf_setup(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ p.setup()
+ self.assertIsNotNone(p.client)
+ self.assertEqual(p.setup_done, True)
+
+ def test_vsperf_teardown(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+
+ # setup() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ p.setup()
+ self.assertIsNotNone(p.client)
+ self.assertEqual(p.setup_done, True)
+
+ p.teardown()
+ self.assertEqual(p.setup_done, False)
+
+ def test_vsperf_run_ok(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+
+ # setup() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ # run() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_ssh.SSH().execute.return_value = (0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+
+ result = {}
+ p.run(result)
+
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
+
+ def test_vsperf_run_falied_vsperf_execution(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+
+ # setup() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ # run() specific mocks
+ mock_ssh.SSH().execute.return_value = (1, '', '')
+
+ result = {}
+ self.assertRaises(RuntimeError, p.run, result)
+
+ def test_vsperf_run_falied_csv_report(self, mock_open, mock_ssh, mock_subprocess):
+ p = vsperf.Vsperf(self.args, self.ctx)
+
+ # setup() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_subprocess.call().execute.return_value = None
+
+ # run() specific mocks
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ mock_ssh.SSH().execute.return_value = (1, '', '')
+
+ result = {}
+ self.assertRaises(RuntimeError, p.run, result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()