summaryrefslogtreecommitdiffstats
path: root/tests/opnfv/test_cases
diff options
context:
space:
mode:
Diffstat (limited to 'tests/opnfv/test_cases')
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml85
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml139
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml90
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml61
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml113
-rwxr-xr-xtests/opnfv/test_cases/opnfv_yardstick_tc073.yaml37
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml27
7 files changed, 467 insertions, 85 deletions
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml
deleted file mode 100644
index 2d10e4073..000000000
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml
+++ /dev/null
@@ -1,85 +0,0 @@
----
-# Yardstick TC004 config file
-# Measure cache hit/miss ratio and usage, network throughput and latency.
-# Different amounts of flows are tested with, from 2 up to 1001000.
-# All tests are run 2 times each. First 2 times with the least
-# amount of ports, then 2 times with the next amount of ports,
-# and so on until all packet sizes have been run with.
-#
-# During the measurements cache hit/miss ration, cache usage statistics and
-# network latency are recorded/measured using cachestat and ping, respectively.
-
-schema: "yardstick:task:0.1"
-
-scenarios:
--
- type: CACHEstat
- run_in_background: true
-
- options:
- interval: 1
-
- host: demeter.yardstick
--
- type: CACHEstat
- run_in_background: true
-
- options:
- interval: 1
-
- host: poseidon.yardstick
--
- type: Ping
- run_in_background: true
-
- options:
- packetsize: 100
-
- host: demeter.yardstick
- target: poseidon.yardstick
-
- sla:
- max_rtt: 10
- action: monitor
-{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %}
--
- type: Pktgen
- options:
- packetsize: 64
- number_of_ports: {{num_ports}}
- duration: 20
-
- host: demeter.yardstick
- target: poseidon.yardstick
-
- runner:
- type: Iteration
- iterations: 2
- interval: 1
-
- sla:
- max_ppm: 1000
- action: monitor
-{% endfor %}
-
-context:
- name: yardstick
- image: yardstick-trusty-server
- flavor: yardstick-flavor
- user: ubuntu
-
- placement_groups:
- pgrp1:
- policy: "availability"
-
- servers:
- demeter:
- floating_ip: true
- placement: "pgrp1"
- poseidon:
- floating_ip: true
- placement: "pgrp1"
-
- networks:
- test:
- cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml
new file mode 100644
index 000000000..0b21f8861
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml
@@ -0,0 +1,139 @@
+---
+# Test case for TC050 :OpenStack Controller Node Network High Availability
+# This test case is written by new scenario-based HA testing framework
+
+schema: "yardstick:task:0.1"
+scenarios:
+ -
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "general-attacker"
+ host: node1
+ key: "close-br-public"
+ attack_key: "close-interface"
+ action_parameter:
+ interface: "br-ex"
+ rollback_parameter:
+ interface: "br-ex"
+
+ -
+ fault_type: "general-attacker"
+ host: node1
+ key: "close-br-mgmt"
+ attack_key: "close-interface"
+ action_parameter:
+ interface: "br-mgmt"
+ rollback_parameter:
+ interface: "br-mgmt"
+
+ -
+ fault_type: "general-attacker"
+ host: node1
+ key: "close-br-storage"
+ attack_key: "close-interface"
+ action_parameter:
+ interface: "br-storage"
+ rollback_parameter:
+ interface: "br-storage"
+
+ -
+ fault_type: "general-attacker"
+ host: node1
+ key: "close-br-private"
+ attack_key: "close-interface"
+ action_parameter:
+ interface: "br-mesh"
+ rollback_parameter:
+ interface: "br-mesh"
+
+ monitors:
+ -
+ monitor_type: "openstack-cmd"
+ key: "nova-image-list"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "neutron-router-list"
+ command_name: "neutron router-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "heat-stack-list"
+ command_name: "heat stack-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "cinder-list"
+ command_name: "cinder list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+
+ steps:
+ -
+ actionKey: "close-br-public"
+ actionType: "attacker"
+ index: 1
+
+ -
+ actionKey: "close-br-mgmt"
+ actionType: "attacker"
+ index: 2
+
+ -
+ actionKey: "close-br-storage"
+ actionType: "attacker"
+ index: 3
+
+ -
+ actionKey: "close-br-private"
+ actionType: "attacker"
+ index: 4
+
+ -
+ actionKey: "nova-image-list"
+ actionType: "monitor"
+ index: 5
+
+ -
+ actionKey: "neutron-router-list"
+ actionType: "monitor"
+ index: 6
+
+ -
+ actionKey: "heat-stack-list"
+ actionType: "monitor"
+ index: 7
+
+ -
+ actionKey: "cinder-list"
+ actionType: "monitor"
+ index: 8
+
+
+ nodes:
+ node1: node1.LF
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+context:
+ type: Node
+ name: LF
+ file: etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml
new file mode 100644
index 000000000..8e2e0c789
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml
@@ -0,0 +1,90 @@
+---
+# Test case for TC051 :OpenStack Controller Node CPU Overload High Availability
+# This test case is written by new scenario-based HA testing framework
+
+schema: "yardstick:task:0.1"
+scenarios:
+ -
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "general-attacker"
+ host: node1
+ key: "stress-cpu"
+ attack_key: "stress-cpu"
+
+ monitors:
+ -
+ monitor_type: "openstack-cmd"
+ key: "nova-image-list"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "neutron-router-list"
+ command_name: "neutron router-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "heat-stack-list"
+ command_name: "heat stack-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "cinder-list"
+ command_name: "cinder list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+
+ steps:
+ -
+ actionKey: "stress-cpu"
+ actionType: "attacker"
+ index: 1
+
+ -
+ actionKey: "nova-image-list"
+ actionType: "monitor"
+ index: 2
+
+ -
+ actionKey: "neutron-router-list"
+ actionType: "monitor"
+ index: 3
+
+ -
+ actionKey: "heat-stack-list"
+ actionType: "monitor"
+ index: 4
+
+ -
+ actionKey: "cinder-list"
+ actionType: "monitor"
+ index: 5
+
+
+ nodes:
+ node1: node1.LF
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+context:
+ type: Node
+ name: LF
+ file: etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml
new file mode 100644
index 000000000..696ed3ba4
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml
@@ -0,0 +1,61 @@
+---
+# Test case for TC053 :Openstack Controller Load Balance Service High Availability
+# This test case is written by new scenario-based HA testing framework
+
+schema: "yardstick:task:0.1"
+scenarios:
+ -
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "kill-process"
+ host: node1
+ key: "kill-process"
+ process_name: "haproxy"
+
+ monitors:
+ -
+ monitor_type: "process"
+ key: "service-status"
+ process_name: "haproxy"
+ host: node1
+ monitor_time: 20
+ sla:
+ max_recover_time: 30
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "list-images"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ steps:
+ -
+ actionKey: "kill-process"
+ actionType: "attacker"
+ index: 1
+ -
+ actionKey: "service-status"
+ actionType: "monitor"
+ index: 2
+ -
+ actionKey: "list-images"
+ actionType: "monitor"
+ index: 3
+
+ nodes:
+ node1: node1.LF
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+context:
+ type: Node
+ name: LF
+ file: etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml
new file mode 100644
index 000000000..7d94e3de8
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml
@@ -0,0 +1,113 @@
+---
+# Test case for TC054 :OpenStack VIP Master Node abnormally shutdown High Availability
+# This test case is written by new scenario-based HA testing framework
+
+schema: "yardstick:task:0.1"
+scenarios:
+ -
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "bare-metal-down"
+ host: node1
+ key: "bare-metal-down"
+
+ monitors:
+ -
+ monitor_type: "openstack-cmd"
+ key: "list-images"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "vip-mgmt-status"
+ host: node2
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ parameter:
+ ip_address: "192.168.0.2"
+
+ -
+ monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "vip-routerp-status"
+ host: node2
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ parameter:
+ ip_address: "172.16.0.2"
+
+ -
+ monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "vip-router-status"
+ host: node2
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ parameter:
+ ip_address: "192.168.0.1"
+
+ -
+ monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "vip-pub"
+ host: node2
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ parameter:
+ ip_address: "172.16.0.3"
+
+
+ steps:
+ -
+ actionKey: "bare-metal-down"
+ actionType: "attacker"
+ index: 1
+ -
+ actionKey: "list-images"
+ actionType: "monitor"
+ index: 2
+
+ -
+ actionKey: "vip-mgmt-status"
+ actionType: "monitor"
+ index: 3
+
+ -
+ actionKey: "vip-routerp-status"
+ actionType: "monitor"
+ index: 4
+
+ -
+ actionKey: "vip-router-status"
+ actionType: "monitor"
+ index: 5
+
+ -
+ actionKey: "vip-pub"
+ actionType: "monitor"
+ index: 6
+
+ nodes:
+ node1: node1.LF
+ node2: node2.LF
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+context:
+ type: Node
+ name: LF
+ file: etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc073.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc073.yaml
new file mode 100755
index 000000000..fd95b8c9d
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc073.yaml
@@ -0,0 +1,37 @@
+---
+# Yardstick TC073 config file
+# measure network latency and throughput using netperf
+# There are two sample scenarios: bulk test and request/response test
+# In bulk test, UDP_STREAM and TCP_STREAM can be used
+# send_msg_size and recv_msg_size are options of bulk test
+# In req/rsp test, TCP_RR TCP_CRR UDP_RR can be used
+# req_rsp_size is option of req/rsp test
+
+schema: "yardstick:task:0.1"
+{% set host = host or "node1.LF" %}
+{% set target = target or "node2.LF" %}
+{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_physical/pod.yaml" %}
+scenarios:
+-
+ type: NetperfNode
+ options:
+ testname: 'UDP_STREAM'
+ send_msg_size: 1024
+ duration: 20
+
+ host: {{host}}
+ target: {{target}}
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+ sla:
+ mean_latency: 100
+ action: monitor
+
+context:
+ type: Node
+ name: LF
+ file: {{pod_info}}
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
new file mode 100644
index 000000000..d506ccc1e
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
@@ -0,0 +1,27 @@
+---
+# Test case for TC074 StorPerf benchmark task config file
+# StorPerf is a tool to measure block and object storage performance in an NFVI
+
+schema: "yardstick:task:0.1"
+{% set public_network = public_network or "ext-net" %}
+{% set StorPerf_ip = StorPerf_ip or "192.168.200.2" %}
+scenarios:
+-
+ type: StorPerf
+ options:
+ agent_count: 1
+ agent_image: "Ubuntu-14.04"
+ public_network: {{public_network}}
+ volume_size: 4
+ block_sizes: "4096"
+ queue_depths: "4"
+ StorPerf_ip: {{StorPerf_ip}}
+ query_interval: 10
+ timeout: 300
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+context:
+ type: Dummy