diff options
Diffstat (limited to 'tests/opnfv/test_cases')
20 files changed, 772 insertions, 37 deletions
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml index 181d7cd73..2e5a4a56f 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml @@ -16,9 +16,15 @@ scenarios: monitors: - monitor_type: "openstack-cmd" command_name: "nova image-list" + monitor_time: 10 + sla: + max_outage_time: 5 - monitor_type: "process" process_name: "nova-api" host: node1 + monitor_time: 20 + sla: + max_recover_time: 20 nodes: node1: node1.LF diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml index ccdcaebc8..544118869 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml @@ -3,7 +3,7 @@ # Measure IPV6 network latency using ping6 schema: "yardstick:task:0.1" - +{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_physical/pod.yaml" %} scenarios: - type: Ping6 @@ -33,6 +33,6 @@ precondition: context: type: Node name: IPV6 - file: /home/opnfv/repos/yardstick/etc/yardstick/nodes/compass_sclab_physical/pod.yaml + file: {{pod_info}} diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc042.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc042.yaml new file mode 100644 index 000000000..158f5076e --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc042.yaml @@ -0,0 +1,50 @@ +--- +# Yardstick TC042 config file +# Measure network latency using testpmd and pktgen-dpdk + +schema: "yardstick:task:0.1" + +scenarios: +- + type: PktgenDPDKLatency + options: + packetsize: 64 + rate: 100 + + host: demeter.yardstick-TC042 + target: poseidon.yardstick-TC042 + + runner: + type: Iteration + iterations: 1 + interval: 1 + + sla: + max_latency: 100 + action: monitor + +context: + name: yardstick-TC042 + image: yardstick-image-pktgen-ready + flavor: yardstick-pktgen-dpdk.flavor + user: ubuntu + + placement_groups: + pgrp1: + policy: "availability" + + servers: + demeter: + floating_ip: true + placement: "pgrp1" + poseidon: + floating_ip: true + placement: "pgrp1" + + networks: + test: + cidr: '10.0.1.0/24' + test2: + cidr: '10.0.2.0/24' + test3: + cidr: '10.0.3.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml index 812d53dd8..79ad61e86 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml @@ -21,9 +21,9 @@ scenarios: - monitor_type: "process" process_name: "neutron-server" host: node1 - monitor_time: 10 + monitor_time: 20 sla: - max_recover_time: 5 + max_recover_time: 20 nodes: node1: node1.LF @@ -39,5 +39,5 @@ scenarios: context: type: Node name: LF - file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml index 867553d21..69cef40a8 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml @@ -21,9 +21,9 @@ scenarios: - monitor_type: "process" process_name: "keystone" host: node1 - monitor_time: 10 + monitor_time: 20 sla: - max_recover_time: 5 + max_recover_time: 20 nodes: node1: node1.LF @@ -39,4 +39,4 @@ scenarios: context: type: Node name: LF - file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc047.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc047.yaml index 0707dc9e9..f6019f6d5 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc047.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc047.yaml @@ -21,9 +21,9 @@ scenarios: - monitor_type: "process" process_name: "glance-api" host: node1 - monitor_time: 10 + monitor_time: 20 sla: - max_recover_time: 5 + max_recover_time: 20 nodes: node1: node1.LF @@ -39,4 +39,4 @@ scenarios: context: type: Node name: LF - file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc048.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc048.yaml index 525c206c3..543db9780 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc048.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc048.yaml @@ -21,9 +21,9 @@ scenarios: - monitor_type: "process" process_name: "cinder-api" host: node1 - monitor_time: 10 + monitor_time: 20 sla: - max_recover_time: 5 + max_recover_time: 20 nodes: node1: node1.LF @@ -39,5 +39,5 @@ scenarios: context: type: Node name: LF - file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc049.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc049.yaml index da93a467f..759867d46 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc049.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc049.yaml @@ -21,9 +21,9 @@ scenarios: - monitor_type: "process" process_name: "swift-proxy" host: node1 - monitor_time: 10 + monitor_time: 20 sla: - max_recover_time: 5 + max_recover_time: 20 nodes: node1: node1.LF @@ -39,4 +39,4 @@ scenarios: context: type: Node name: LF - file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml new file mode 100644 index 000000000..0b21f8861 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml @@ -0,0 +1,139 @@ +--- +# Test case for TC050 :OpenStack Controller Node Network High Availability +# This test case is written by new scenario-based HA testing framework + +schema: "yardstick:task:0.1" +scenarios: + - + type: "GeneralHA" + options: + attackers: + - + fault_type: "general-attacker" + host: node1 + key: "close-br-public" + attack_key: "close-interface" + action_parameter: + interface: "br-ex" + rollback_parameter: + interface: "br-ex" + + - + fault_type: "general-attacker" + host: node1 + key: "close-br-mgmt" + attack_key: "close-interface" + action_parameter: + interface: "br-mgmt" + rollback_parameter: + interface: "br-mgmt" + + - + fault_type: "general-attacker" + host: node1 + key: "close-br-storage" + attack_key: "close-interface" + action_parameter: + interface: "br-storage" + rollback_parameter: + interface: "br-storage" + + - + fault_type: "general-attacker" + host: node1 + key: "close-br-private" + attack_key: "close-interface" + action_parameter: + interface: "br-mesh" + rollback_parameter: + interface: "br-mesh" + + monitors: + - + monitor_type: "openstack-cmd" + key: "nova-image-list" + command_name: "nova image-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + - + monitor_type: "openstack-cmd" + key: "neutron-router-list" + command_name: "neutron router-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + - + monitor_type: "openstack-cmd" + key: "heat-stack-list" + command_name: "heat stack-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + - + monitor_type: "openstack-cmd" + key: "cinder-list" + command_name: "cinder list" + monitor_time: 10 + sla: + max_outage_time: 5 + + + steps: + - + actionKey: "close-br-public" + actionType: "attacker" + index: 1 + + - + actionKey: "close-br-mgmt" + actionType: "attacker" + index: 2 + + - + actionKey: "close-br-storage" + actionType: "attacker" + index: 3 + + - + actionKey: "close-br-private" + actionType: "attacker" + index: 4 + + - + actionKey: "nova-image-list" + actionType: "monitor" + index: 5 + + - + actionKey: "neutron-router-list" + actionType: "monitor" + index: 6 + + - + actionKey: "heat-stack-list" + actionType: "monitor" + index: 7 + + - + actionKey: "cinder-list" + actionType: "monitor" + index: 8 + + + nodes: + node1: node1.LF + runner: + type: Duration + duration: 1 + sla: + outage_time: 5 + action: monitor + +context: + type: Node + name: LF + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml new file mode 100644 index 000000000..8e2e0c789 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml @@ -0,0 +1,90 @@ +--- +# Test case for TC051 :OpenStack Controller Node CPU Overload High Availability +# This test case is written by new scenario-based HA testing framework + +schema: "yardstick:task:0.1" +scenarios: + - + type: "GeneralHA" + options: + attackers: + - + fault_type: "general-attacker" + host: node1 + key: "stress-cpu" + attack_key: "stress-cpu" + + monitors: + - + monitor_type: "openstack-cmd" + key: "nova-image-list" + command_name: "nova image-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + - + monitor_type: "openstack-cmd" + key: "neutron-router-list" + command_name: "neutron router-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + - + monitor_type: "openstack-cmd" + key: "heat-stack-list" + command_name: "heat stack-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + - + monitor_type: "openstack-cmd" + key: "cinder-list" + command_name: "cinder list" + monitor_time: 10 + sla: + max_outage_time: 5 + + + steps: + - + actionKey: "stress-cpu" + actionType: "attacker" + index: 1 + + - + actionKey: "nova-image-list" + actionType: "monitor" + index: 2 + + - + actionKey: "neutron-router-list" + actionType: "monitor" + index: 3 + + - + actionKey: "heat-stack-list" + actionType: "monitor" + index: 4 + + - + actionKey: "cinder-list" + actionType: "monitor" + index: 5 + + + nodes: + node1: node1.LF + runner: + type: Duration + duration: 1 + sla: + outage_time: 5 + action: monitor + +context: + type: Node + name: LF + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml new file mode 100644 index 000000000..714306881 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml @@ -0,0 +1,79 @@ +--- +# Test case for TC052 :OpenStack Controller Node Disk I/O Block High Availability +# This test case is written by new scenario-based HA testing framework + +schema: "yardstick:task:0.1" +scenarios: + - + type: "GeneralHA" + options: + attackers: + - + fault_type: "general-attacker" + host: node1 + key: "block-io" + attack_key: "block-io" + + monitors: + - + monitor_type: "openstack-cmd" + key: "nova-flavor-list" + command_name: "nova flavor-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + operations: + - + operation_type: "general-operation" + key: "create-flavor" + operation_key: "nova-create-flavor" + host: node1 + action_parameter: + flavorconfig: "test-001 test-001 100 1 1" + rollback_parameter: + flavorid: "test-001" + + resultCheckers: + - + checker_type: "general-result-checker" + key: "check-flavor" + host: node1 + checker_key: "nova-flavor-checker" + expectedValue: "test-001" + condition: "in" + + steps: + - + actionKey: "block-io" + actionType: "attacker" + index: 1 + + - + actionKey: "nova-flavor-list" + actionType: "monitor" + index: 2 + + - + actionKey: "create-flavor" + actionType: "operation" + index: 3 + + - + actionKey: "check-flavor" + actionType: "resultchecker" + index: 4 + + nodes: + node1: node1.LF + runner: + type: Duration + duration: 1 + sla: + outage_time: 5 + action: monitor + +context: + type: Node + name: LF + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml new file mode 100644 index 000000000..696ed3ba4 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml @@ -0,0 +1,61 @@ +--- +# Test case for TC053 :Openstack Controller Load Balance Service High Availability +# This test case is written by new scenario-based HA testing framework + +schema: "yardstick:task:0.1" +scenarios: + - + type: "GeneralHA" + options: + attackers: + - + fault_type: "kill-process" + host: node1 + key: "kill-process" + process_name: "haproxy" + + monitors: + - + monitor_type: "process" + key: "service-status" + process_name: "haproxy" + host: node1 + monitor_time: 20 + sla: + max_recover_time: 30 + + - + monitor_type: "openstack-cmd" + key: "list-images" + command_name: "nova image-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + steps: + - + actionKey: "kill-process" + actionType: "attacker" + index: 1 + - + actionKey: "service-status" + actionType: "monitor" + index: 2 + - + actionKey: "list-images" + actionType: "monitor" + index: 3 + + nodes: + node1: node1.LF + runner: + type: Duration + duration: 1 + sla: + outage_time: 5 + action: monitor + +context: + type: Node + name: LF + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml new file mode 100644 index 000000000..7d94e3de8 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml @@ -0,0 +1,113 @@ +--- +# Test case for TC054 :OpenStack VIP Master Node abnormally shutdown High Availability +# This test case is written by new scenario-based HA testing framework + +schema: "yardstick:task:0.1" +scenarios: + - + type: "GeneralHA" + options: + attackers: + - + fault_type: "bare-metal-down" + host: node1 + key: "bare-metal-down" + + monitors: + - + monitor_type: "openstack-cmd" + key: "list-images" + command_name: "nova image-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + - + monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "vip-mgmt-status" + host: node2 + monitor_time: 10 + sla: + max_outage_time: 5 + parameter: + ip_address: "192.168.0.2" + + - + monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "vip-routerp-status" + host: node2 + monitor_time: 10 + sla: + max_outage_time: 5 + parameter: + ip_address: "172.16.0.2" + + - + monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "vip-router-status" + host: node2 + monitor_time: 10 + sla: + max_outage_time: 5 + parameter: + ip_address: "192.168.0.1" + + - + monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "vip-pub" + host: node2 + monitor_time: 10 + sla: + max_outage_time: 5 + parameter: + ip_address: "172.16.0.3" + + + steps: + - + actionKey: "bare-metal-down" + actionType: "attacker" + index: 1 + - + actionKey: "list-images" + actionType: "monitor" + index: 2 + + - + actionKey: "vip-mgmt-status" + actionType: "monitor" + index: 3 + + - + actionKey: "vip-routerp-status" + actionType: "monitor" + index: 4 + + - + actionKey: "vip-router-status" + actionType: "monitor" + index: 5 + + - + actionKey: "vip-pub" + actionType: "monitor" + index: 6 + + nodes: + node1: node1.LF + node2: node2.LF + runner: + type: Duration + duration: 1 + sla: + outage_time: 5 + action: monitor + +context: + type: Node + name: LF + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc055.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc055.yaml new file mode 100644 index 000000000..b43e56665 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc055.yaml @@ -0,0 +1,23 @@ +--- +# Yardstick TC055 config file +# Collect hardware specification from /proc/cpuinfo + +schema: "yardstick:task:0.1" +{% set host = host or "node5.yardstick-TC055" %} +{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_virtual/pod.yaml" %} +scenarios: +- + type: ComputeCapacity + options: + + nodes: + host: {{host}} + + runner: + type: Iteration + iterations: 1 + +context: + type: Node + name: yardstick-TC055 + file: {{pod_info}} diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc063.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc063.yaml new file mode 100644 index 000000000..9da889847 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc063.yaml @@ -0,0 +1,23 @@ +# Yardstick TC063 config file +# Measure disk size, block size and disk utilization using fdisk and iostat + +schema: "yardstick:task:0.1" +{% set host = host or "node5.yardstick-TC063" %} +{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_virtual/pod.yaml" %} + +scenarios: +- + type: StorageCapacity + options: + test_type: "disk_size" + + host: {{host}} + + runner: + type: Iteration + iterations: 1 + +context: + type: Node + name: yardstick-TC063 + file: {{pod_info}} diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc070.yaml index d7406832d..28b28b9ab 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc044.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc070.yaml @@ -1,13 +1,13 @@ --- -# Yardstick TC044 config file -# Measure memory usage statistics, network throughput, latency and packet loss. +# Yardstick TC070 config file +# Measure network throughput and packet loss using pktgen. # Different amounts of flows are tested with, from 2 up to 1001000. # All tests are run 2 times each. First 2 times with the least # amount of ports, then 2 times with the next amount of ports, # and so on until all packet sizes have been run with. # # During the measurements memory usage statistics and network latency are -# recorded/measured using sar and ping, respectively. +# recorded/measured using free and ping, respectively. schema: "yardstick:task:0.1" @@ -18,18 +18,18 @@ scenarios: options: interval: 1 - count: 1 + count: 10 - host: demeter.yardstick-TC044 + host: demeter.yardstick-TC070 - type: MEMORYload run_in_background: true options: interval: 1 - count: 1 + count: 10 - host: poseidon.yardstick-TC044 + host: poseidon.yardstick-TC070 - type: Ping run_in_background: true @@ -37,8 +37,8 @@ scenarios: options: packetsize: 100 - host: demeter.yardstick-TC044 - target: poseidon.yardstick-TC044 + host: demeter.yardstick-TC070 + target: poseidon.yardstick-TC070 sla: max_rtt: 10 @@ -51,8 +51,8 @@ scenarios: number_of_ports: {{num_ports}} duration: 20 - host: demeter.yardstick-TC044 - target: poseidon.yardstick-TC044 + host: demeter.yardstick-TC070 + target: poseidon.yardstick-TC070 runner: type: Iteration @@ -65,7 +65,7 @@ scenarios: {% endfor %} context: - name: yardstick-TC044 + name: yardstick-TC070 image: yardstick-trusty-server flavor: yardstick-flavor user: ubuntu diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc071.yaml index 2d10e4073..644010916 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc071.yaml @@ -1,5 +1,5 @@ --- -# Yardstick TC004 config file +# Yardstick TC071 config file # Measure cache hit/miss ratio and usage, network throughput and latency. # Different amounts of flows are tested with, from 2 up to 1001000. # All tests are run 2 times each. First 2 times with the least @@ -19,7 +19,7 @@ scenarios: options: interval: 1 - host: demeter.yardstick + host: demeter.yardstick-TC071 - type: CACHEstat run_in_background: true @@ -27,7 +27,7 @@ scenarios: options: interval: 1 - host: poseidon.yardstick + host: poseidon.yardstick-TC071 - type: Ping run_in_background: true @@ -35,8 +35,8 @@ scenarios: options: packetsize: 100 - host: demeter.yardstick - target: poseidon.yardstick + host: demeter.yardstick-TC071 + target: poseidon.yardstick-TC071 sla: max_rtt: 10 @@ -49,8 +49,8 @@ scenarios: number_of_ports: {{num_ports}} duration: 20 - host: demeter.yardstick - target: poseidon.yardstick + host: demeter.yardstick-TC071 + target: poseidon.yardstick-TC071 runner: type: Iteration @@ -63,7 +63,7 @@ scenarios: {% endfor %} context: - name: yardstick + name: yardstick-TC071 image: yardstick-trusty-server flavor: yardstick-flavor user: ubuntu diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc072.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc072.yaml new file mode 100644 index 000000000..f3e6d4c40 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc072.yaml @@ -0,0 +1,87 @@ +--- +# Yardstick TC072 config file +# Measure network throughput and packet loss using pktgen. +# Different amounts of flows are tested with, from 2 up to 1001000. +# All tests are run 2 times each. First 2 times with the least +# amount of ports, then 2 times with the next amount of ports, +# and so on until all packet sizes have been run with. +# +# During the measurements network usage statistics and network latency are +# recorded/measured using sar and ping, respectively. + +schema: "yardstick:task:0.1" + +scenarios: +- + type: NetUtilization + run_in_background: true + + options: + interval: 1 + count: 10 + + host: demeter.yardstick-TC072 +- + type: NetUtilization + run_in_background: true + + options: + interval: 1 + count: 10 + + host: poseidon.yardstick-TC072 +- + type: Ping + run_in_background: true + + options: + packetsize: 100 + + host: demeter.yardstick-TC072 + target: poseidon.yardstick-TC072 + + sla: + max_rtt: 10 + action: monitor +{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %} +- + type: Pktgen + options: + packetsize: 64 + number_of_ports: {{num_ports}} + duration: 20 + + host: demeter.yardstick-TC072 + target: poseidon.yardstick-TC072 + + runner: + type: Iteration + iterations: 2 + interval: 1 + + sla: + max_ppm: 1000 + action: monitor +{% endfor %} + +context: + name: yardstick-TC072 + image: yardstick-trusty-server + flavor: yardstick-flavor + user: ubuntu + + placement_groups: + pgrp1: + policy: "availability" + + servers: + demeter: + floating_ip: true + placement: "pgrp1" + poseidon: + floating_ip: true + placement: "pgrp1" + + networks: + test: + cidr: '10.0.1.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc073.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc073.yaml new file mode 100755 index 000000000..fd95b8c9d --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc073.yaml @@ -0,0 +1,37 @@ +--- +# Yardstick TC073 config file +# measure network latency and throughput using netperf +# There are two sample scenarios: bulk test and request/response test +# In bulk test, UDP_STREAM and TCP_STREAM can be used +# send_msg_size and recv_msg_size are options of bulk test +# In req/rsp test, TCP_RR TCP_CRR UDP_RR can be used +# req_rsp_size is option of req/rsp test + +schema: "yardstick:task:0.1" +{% set host = host or "node1.LF" %} +{% set target = target or "node2.LF" %} +{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_physical/pod.yaml" %} +scenarios: +- + type: NetperfNode + options: + testname: 'UDP_STREAM' + send_msg_size: 1024 + duration: 20 + + host: {{host}} + target: {{target}} + + runner: + type: Iteration + iterations: 1 + interval: 1 + sla: + mean_latency: 100 + action: monitor + +context: + type: Node + name: LF + file: {{pod_info}} + diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml new file mode 100644 index 000000000..d506ccc1e --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml @@ -0,0 +1,27 @@ +--- +# Test case for TC074 StorPerf benchmark task config file +# StorPerf is a tool to measure block and object storage performance in an NFVI + +schema: "yardstick:task:0.1" +{% set public_network = public_network or "ext-net" %} +{% set StorPerf_ip = StorPerf_ip or "192.168.200.2" %} +scenarios: +- + type: StorPerf + options: + agent_count: 1 + agent_image: "Ubuntu-14.04" + public_network: {{public_network}} + volume_size: 4 + block_sizes: "4096" + queue_depths: "4" + StorPerf_ip: {{StorPerf_ip}} + query_interval: 10 + timeout: 300 + + runner: + type: Iteration + iterations: 1 + +context: + type: Dummy |