diff options
Diffstat (limited to 'tests')
48 files changed, 2004 insertions, 283 deletions
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml new file mode 100644 index 000000000..f89a3099e --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml @@ -0,0 +1,48 @@ +--- +# Yardstick TC005 config file +# Measure Storage IOPS, throughput and latency using fio + +schema: "yardstick:task:0.1" +scenarios: +{% for rw in ['read', 'write', 'randwrite', 'randread', 'rw'] %} + {% for bs in ['4k', '64k', '1024k'] %} +- + type: Fio + options: + filename: /home/ec2-user/data.raw + bs: {{bs}} + rw: {{rw}} + ramp_time: 10 + duration: 20 + + host: fio.yardstick-TC005 + + runner: + type: Iteration + iterations: 1 + interval: 1 + + sla: + read_bw: 400 + read_iops: 100 + read_lat: 20000 + write_bw: 400 + write_iops: 100 + write_lat: 20000 + action: monitor + {% endfor %} +{% endfor %} + +context: + name: yardstick-TC005 + image: yardstick-trusty-server + flavor: m1.small + user: ec2-user + + servers: + fio: + floating_ip: true + + networks: + test: + cidr: '10.0.1.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml new file mode 100644 index 000000000..3d4091293 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml @@ -0,0 +1,26 @@ +--- +schema: "yardstick:task:0.1" + +scenarios: +- + type: vtc_throughput + + options: + packet_size: 1280 + vlan_sender: 1007 + vlan_receiver: 1006 + default_net_name: monitoring + default_subnet_name: monitoring_subnet + vlan_net_1_name: inbound_traffic_network + vlan_subnet_1_name: inbound_traffic_subnet + vlan_net_2_name: inbound_traffic_network + vlan_subnet_2_name: inbound_traffic_subnet + vnic_type: direct # [normal (OvS), direct (SR-IOV)] + vtc_flavor: m1.large + + runner: + type: Iteration + iterations: 1 + +context: + type: Dummy diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml new file mode 100644 index 000000000..30d59f797 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml @@ -0,0 +1,32 @@ +--- +# Sample benchmark task config file +# vTC + +schema: "yardstick:task:0.1" + +scenarios: +- + type: vtc_throughput_noisy + + options: + packet_size: 1280 + vlan_sender: 1007 + vlan_receiver: 1006 + default_net_name: monitoring + default_subnet_name: monitoring_subnet + vlan_net_1_name: inbound_traffic_network + vlan_subnet_1_name: inbound_traffic_subnet + vlan_net_2_name: inbound_traffic_network + vlan_subnet_2_name: inbound_traffic_subnet + vnic_type: direct # [normal (OvS), direct (SR-IOV)] + vtc_flavor: m1.large + num_of_neighbours: 2 + amount_of_ram: 1G + number_of_cores: 2 + + runner: + type: Iteration + iterations: 1 + +context: + type: Dummy diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml new file mode 100644 index 000000000..385e530ce --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml @@ -0,0 +1,58 @@ +--- +# Yardstick TC008 config file +# Measure network throughput and packet loss using Pktgen. +# Different amount of flows, from 2 up to 1001000, in combination +# with different packet sizes are run in each test. +# Each combination of packet size and flow amount is run 10 times. +# First 10 times with the smallest packet size, starting with the +# least amount of ports/flows, then next amount of ports with same +# packet size, and so on. The test sequence continues with the next +# packet size, with same ports/flows sequence as before. + +schema: "yardstick:task:0.1" + +scenarios: +{% for pkt_size in [64, 128, 256, 512, 1024, 1280, 1518] %} + {% for num_ports in [1, 10, 50, 100, 500, 1000] %} +- + type: Pktgen + options: + packetsize: {{pkt_size}} + number_of_ports: {{num_ports}} + duration: 20 + + host: demeter.yardstick-TC008 + target: poseidon.yardstick-TC008 + + runner: + type: Iteration + iterations: 10 + interval: 1 + + sla: + max_ppm: 1000 + action: monitor + {% endfor %} +{% endfor %} + +context: + name: yardstick-TC008 + image: yardstick-trusty-server + flavor: yardstick-flavor + user: ec2-user + + placement_groups: + pgrp1: + policy: "availability" + + servers: + demeter: + floating_ip: true + placement: "pgrp1" + poseidon: + floating_ip: true + placement: "pgrp1" + + networks: + test: + cidr: '10.0.1.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml new file mode 100644 index 000000000..4d46c0336 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml @@ -0,0 +1,53 @@ +--- +# Yardstick TC009 config file +# Measure network throughput and packet loss using pktgen. +# Different amounts of flows are tested with, from 2 up to 1001000. +# All tests are run 10 times each. First 10 times with the least +# amount of ports, then 10 times with the next amount of ports, +# and so on until all packet sizes have been run with. + +schema: "yardstick:task:0.1" + +scenarios: +{% for num_ports in [1, 10, 50, 100, 500, 1000] %} +- + type: Pktgen + options: + packetsize: 64 + number_of_ports: {{num_ports}} + duration: 20 + + host: demeter.yardstick-TC009 + target: poseidon.yardstick-TC009 + + runner: + type: Iteration + iterations: 10 + interval: 1 + + sla: + max_ppm: 1000 + action: monitor +{% endfor %} + +context: + name: yardstick-TC009 + image: yardstick-trusty-server + flavor: yardstick-flavor + user: ec2-user + + placement_groups: + pgrp1: + policy: "availability" + + servers: + demeter: + floating_ip: true + placement: "pgrp1" + poseidon: + floating_ip: true + placement: "pgrp1" + + networks: + test: + cidr: '10.0.1.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml new file mode 100644 index 000000000..42327f05f --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml @@ -0,0 +1,38 @@ +--- +# Yardstick TC010 config file +# measure memory read latency using lmbench + +schema: "yardstick:task:0.1" + +scenarios: +- + type: Lmbench + options: + test_type: "latency" + stride: 128 + stop_size: 64.0 + + host: demeter.yardstick-TC010 + + runner: + type: Iteration + iterations: 10 + interval: 1 + + sla: + max_latency: 30 + action: monitor + +context: + name: yardstick-TC010 + image: yardstick-trusty-server + flavor: m1.small + user: ec2-user + + servers: + demeter: + floating_ip: true + + networks: + test: + cidr: '10.0.1.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml new file mode 100644 index 000000000..f1b995371 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml @@ -0,0 +1,32 @@ +--- +# Yardstick TC014 config file +# Measure Processing speed using unixbench + +schema: "yardstick:task:0.1" + +scenarios: +- + type: UnixBench + options: + run_mode: 'verbose' + test_type: 'dhry2reg' + host: Chang'e.yardstick-TC014 + + runner: + type: Iteration + iterations: 1 + interval: 1 + +context: + name: yardstick-TC014 + image: yardstick-trusty-server + flavor: yardstick-flavor + user: ec2-user + + servers: + Chang'e: + floating_ip: true + + networks: + test: + cidr: '10.0.1.0/24'
\ No newline at end of file diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml new file mode 100644 index 000000000..181d7cd73 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml @@ -0,0 +1,38 @@ +--- +# Sample test case for the HA of controller node Openstack service + +schema: "yardstick:task:0.1" + +scenarios: +- + type: ServiceHA + options: + attackers: + - fault_type: "kill-process" + process_name: "nova-api" + host: node1 + + wait_time: 10 + monitors: + - monitor_type: "openstack-cmd" + command_name: "nova image-list" + - monitor_type: "process" + process_name: "nova-api" + host: node1 + + nodes: + node1: node1.LF + + runner: + type: Iteration + iterations: 1 + + sla: + outage_time: 5 + action: monitor + + +context: + type: Node + name: LF + file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml new file mode 100644 index 000000000..8d9edfe7b --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml @@ -0,0 +1,31 @@ +--- +# Sample benchmark task config file +# vTC + +schema: "yardstick:task:0.1" + +scenarios: +- + type: vtc_instantiation_validation + + options: + vlan_sender: 1007 + vlan_receiver: 1006 + default_net_name: monitoring + default_subnet_name: monitoring_subnet + vlan_net_1_name: inbound_traffic_network + vlan_subnet_1_name: inbound_traffic_subnet + vlan_net_2_name: inbound_traffic_network + vlan_subnet_2_name: inbound_traffic_subnet + vnic_type: direct # [normal (OvS), direct (SR-IOV)] + vtc_flavor: m1.large + + runner: + type: Iteration + iterations: 1 + +# dummy context, will not be used by vTC +context: + type: Node + name: LF + file: /etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml new file mode 100644 index 000000000..c62ce2a32 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml @@ -0,0 +1,28 @@ +--- +schema: "yardstick:task:0.1" + +scenarios: +- + type: vtc_instantiation_validation_noisy + + options: + vlan_sender: 1007 + vlan_receiver: 1006 + default_net_name: monitoring + default_subnet_name: monitoring_subnet + vlan_net_1_name: inbound_traffic_network + vlan_subnet_1_name: inbound_traffic_subnet + vlan_net_2_name: inbound_traffic_network + vlan_subnet_2_name: inbound_traffic_subnet + vnic_type: direct # [normal (OvS), direct (SR-IOV)] + vtc_flavor: m1.large + num_of_neighbours: 2 + amount_of_ram: 1G + number_of_cores: 2 + + runner: + type: Iteration + iterations: 1 + +context: + type: Dummy diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml new file mode 100644 index 000000000..9b5e86509 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml @@ -0,0 +1,27 @@ +--- +# Yardstick TC027 config file +# Measure IPV6 network latency using ping6 + +schema: "yardstick:task:0.1" + +scenarios: +- + type: Ping6 + host: node1.IPV6 + + runner: + type: Iteration + iterations: 1 + interval: 1 + run_step: 'setup,run,teardown' + sla: + max_rtt: 10 + action: monitor + + +context: + type: Node + name: IPV6 + file: /root/yardstick/etc/yardstick/nodes/compass_sclab_physical/pod.yaml + + diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml new file mode 100644 index 000000000..a73dfee0a --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml @@ -0,0 +1,85 @@ +--- +# Yardstick TC037 config file +# Measure network throughput and packet loss using pktgen. +# Different amounts of flows are tested with, from 2 up to 1001000. +# All tests are run 2 times each. First 2 times with the least +# amount of ports, then 2 times with the next amount of ports, +# and so on until all packet sizes have been run with. +# +# During the measurements system load and network latency are +# recorded/measured using ping and mpstat, respectively. + +schema: "yardstick:task:0.1" + +scenarios: +- + type: CPUload + run_in_background: true + + options: + interval: 1 + + host: demeter.yardstick-TC037 +- + type: CPUload + run_in_background: true + + options: + interval: 1 + + host: poseidon.yardstick-TC037 +- + type: Ping + run_in_background: true + + options: + packetsize: 100 + + host: demeter.yardstick-TC037 + target: poseidon.yardstick-TC037 + + sla: + max_rtt: 10 + action: monitor +{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %} +- + type: Pktgen + options: + packetsize: 64 + number_of_ports: {{num_ports}} + duration: 20 + + host: demeter.yardstick-TC037 + target: poseidon.yardstick-TC037 + + runner: + type: Iteration + iterations: 2 + interval: 1 + + sla: + max_ppm: 1000 + action: monitor +{% endfor %} + +context: + name: yardstick-TC037 + image: yardstick-trusty-server + flavor: yardstick-flavor + user: ec2-user + + placement_groups: + pgrp1: + policy: "availability" + + servers: + demeter: + floating_ip: true + placement: "pgrp1" + poseidon: + floating_ip: true + placement: "pgrp1" + + networks: + test: + cidr: '10.0.1.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml new file mode 100644 index 000000000..59608e312 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml @@ -0,0 +1,85 @@ +--- +# Yardstick TC038 config file +# Measure network throughput and packet loss using pktgen. +# Different amounts of flows are tested with, from 2 up to 1001000. +# All tests are run 10 times each. First 10 times with the least +# amount of ports, then 10 times with the next amount of ports, +# and so on until all packet sizes have been run with. +# +# During the measurements system load and network latency are +# recorded/measured using ping and mpstat, respectively. + +schema: "yardstick:task:0.1" + +scenarios: +- + type: CPUload + run_in_background: true + + options: + interval: 1 + + host: demeter.yardstick-TC038 +- + type: CPUload + run_in_background: true + + options: + interval: 1 + + host: poseidon.yardstick-TC038 +- + type: Ping + run_in_background: true + + options: + packetsize: 100 + + host: demeter.yardstick-TC038 + target: poseidon.yardstick-TC038 + + sla: + max_rtt: 10 + action: monitor +{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %} +- + type: Pktgen + options: + packetsize: 64 + number_of_ports: {{num_ports}} + duration: 20 + + host: demeter.yardstick-TC038 + target: poseidon.yardstick-TC038 + + runner: + type: Iteration + iterations: 10 + interval: 1 + + sla: + max_ppm: 1000 + action: monitor +{% endfor %} + +context: + name: yardstick-TC038 + image: yardstick-trusty-server + flavor: yardstick-flavor + user: ec2-user + + placement_groups: + pgrp1: + policy: "availability" + + servers: + demeter: + floating_ip: true + placement: "pgrp1" + poseidon: + floating_ip: true + placement: "pgrp1" + + networks: + test: + cidr: '10.0.1.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml new file mode 100644 index 000000000..0a6dee656 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml @@ -0,0 +1,22 @@ +--- +# Yardstick TC040 config file +# Running Parser Yang-to-Tosca module as a tool, validating output against expected outcome + +schema: "yardstick:task:0.1" + + +scenarios: +- + type: Parser + options: + yangfile: /home/opnfv/repos/yardstick/samples/yang.yaml + toscafile: /home/opnfv/repos/yardstick//samples/tosca.yaml + + runner: + type: Iteration + iterations: 1 + interval: 1 + +context: + type: Dummy + diff --git a/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml b/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml index 8279d2378..04bac491f 100644 --- a/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml @@ -11,4 +11,8 @@ test_cases: - file_name: opnfv_yardstick_tc002.yaml - + file_name: opnfv_yardstick_tc005.yaml +- file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml b/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml new file mode 100644 index 000000000..c3e68150d --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml @@ -0,0 +1,18 @@ +--- +# ERICSSON POD2 daily task suite + +schema: "yardstick:suite:0.1" + +name: "opnfv_ericsson_daily" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc001.yaml +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml b/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml index e883f560f..ee13e6d9d 100644 --- a/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml @@ -11,4 +11,8 @@ test_cases: - file_name: opnfv_yardstick_tc002.yaml - + file_name: opnfv_yardstick_tc005.yaml +- file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml new file mode 100644 index 000000000..1bb241ed8 --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml @@ -0,0 +1,18 @@ +--- +# INTEL POD2 daily task suite + +schema: "yardstick:suite:0.1" + +name: "opnfv_intel_daily" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc001.yaml +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml new file mode 100644 index 000000000..2ffacb1d0 --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml @@ -0,0 +1,18 @@ +--- +# INTEL POD5 daily task suite + +schema: "yardstick:suite:0.1" + +name: "opnfv_intel_daily" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc001.yaml +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml new file mode 100644 index 000000000..792bba2b0 --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml @@ -0,0 +1,18 @@ +--- +# INTEL POD6 daily task suite + +schema: "yardstick:suite:0.1" + +name: "opnfv_intel_daily" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc001.yaml +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml new file mode 100644 index 000000000..f10a854d2 --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml @@ -0,0 +1,18 @@ +--- +# INTEL POD8 daily task suite + +schema: "yardstick:suite:0.1" + +name: "opnfv_intel_daily" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc001.yaml +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml b/tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml new file mode 100644 index 000000000..baade6987 --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml @@ -0,0 +1,18 @@ +--- +# LF POD 1 daily task suite + +schema: "yardstick:suite:0.1" + +name: "opnfv_lf_daily" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc001.yaml +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml b/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml index 4dece13f2..57c95cf69 100644 --- a/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml @@ -11,4 +11,8 @@ test_cases: - file_name: opnfv_yardstick_tc002.yaml - + file_name: opnfv_yardstick_tc005.yaml +- file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/opnfv/test_suites/opnfv_vTC_daily.yaml b/tests/opnfv/test_suites/opnfv_vTC_daily.yaml new file mode 100644 index 000000000..37738b423 --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_vTC_daily.yaml @@ -0,0 +1,16 @@ +--- +# ERICSSON POD1 VTC daily task suite + +schema: "yardstick:suite:0.1" + +name: "opnfv_vTC_daily" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc006.yaml +- + file_name: opnfv_yardstick_tc007.yaml +- + file_name: opnfv_yardstick_tc020.yaml +- + file_name: opnfv_yardstick_tc021.yaml diff --git a/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml b/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml new file mode 100644 index 000000000..216648d6f --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml @@ -0,0 +1,16 @@ +--- +# ERICSSON POD1 VTC weekly task suite + +schema: "yardstick:suite:0.1" + +name: "opnfv_vTC_weekly" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc006.yaml +- + file_name: opnfv_yardstick_tc007.yaml +- + file_name: opnfv_yardstick_tc020.yaml +- + file_name: opnfv_yardstick_tc021.yaml diff --git a/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml b/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml new file mode 100644 index 000000000..8016b46b2 --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml @@ -0,0 +1,18 @@ +--- +# ZTE POD 1 daily task suite + +schema: "yardstick:suite:0.1" + +name: "opnfv_zte_daily" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc001.yaml +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc037.yaml diff --git a/tests/unit/benchmark/contexts/test_dummy.py b/tests/unit/benchmark/contexts/test_dummy.py new file mode 100644 index 000000000..5214e6630 --- /dev/null +++ b/tests/unit/benchmark/contexts/test_dummy.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.contexts.dummy + +import unittest + +from yardstick.benchmark.contexts import dummy + + +class DummyContextTestCase(unittest.TestCase): + + def setUp(self): + self.test_context = dummy.DummyContext() + + def test__get_server(self): + self.test_context.init(None) + self.test_context.deploy() + + result = self.test_context._get_server(None) + self.assertEqual(result, None) + + self.test_context.undeploy() diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py new file mode 100644 index 000000000..340f94cb0 --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.attacker import baseattacker +from yardstick.benchmark.scenarios.availability.attacker import attacker_baremetal + +@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess') +class ExecuteShellTestCase(unittest.TestCase): + + def test__fun_execute_shell_command_successful(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.return_value = (0, 'unittest') + exitcode, output = attacker_baremetal._execute_shell_command(cmd) + self.assertEqual(exitcode, 0) + + def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.side_effect = RuntimeError + exitcode, output = attacker_baremetal._execute_shell_command(cmd) + self.assertEqual(exitcode, -1) + + +@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh') +class AttackerBaremetalTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ipmi_ip": "10.20.0.5", + "ipmi_user": "root", + "ipmi_pwd": "123456", + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.attacker_cfg = { + 'fault_type': 'bear-metal-down', + 'host': 'node1', + } + + def test__attacker_baremetal_all_successful(self, mock_ssh): + + ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "running", '') + ins.setup() + ins.inject_fault() + ins.recover() + + def test__attacker_baremetal_check_failuer(self, mock_ssh): + + ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) + mock_ssh.SSH().execute.return_value = (0, "error check", '') + ins.setup() + + def test__attacker_baremetal_recover_successful(self, mock_ssh): + + self.attacker_cfg["jump_host"] = 'node1' + self.context["node1"]["pwd"] = "123456" + ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "running", '') + ins.setup() + ins.recover() diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py new file mode 100644 index 000000000..eb0cce70d --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_process + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.attacker import baseattacker + +@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_process.ssh') +class AttackerServiceTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.attacker_cfg = { + 'fault_type': 'kill-process', + 'process_name': 'nova-api', + 'host': 'node1', + } + + def test__attacker_service_all_successful(self, mock_ssh): + + cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg) + ins = cls(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "running", '') + ins.setup() + ins.inject_fault() + ins.recover() + + def test__attacker_service_check_failuer(self, mock_ssh): + + cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg) + ins = cls(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "error check", '') + ins.setup() diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py new file mode 100644 index 000000000..13295273b --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.monitor import basemonitor + + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.BaseMonitor') +class MonitorMgrTestCase(unittest.TestCase): + + def setUp(self): + config = { + 'monitor_type': 'openstack-api', + } + + self.monitor_configs = [] + self.monitor_configs.append(config) + + def test__MonitorMgr_setup_successful(self, mock_monitor): + instance = basemonitor.MonitorMgr() + instance.init_monitors(self.monitor_configs, None) + instance.start_monitors() + instance.wait_monitors() + + ret = instance.verify_SLA() + +class BaseMonitorTestCase(unittest.TestCase): + + class MonitorSimple(basemonitor.BaseMonitor): + __monitor_type__ = "MonitorForTest" + def setup(self): + self.monitor_result = False + + def monitor_func(self): + return self.monitor_result + + def setUp(self): + self.monitor_cfg = { + 'monitor_type': 'MonitorForTest', + 'command_name': 'nova image-list', + 'monitor_time': 0.01, + 'sla': {'max_outage_time': 5} + } + + def test__basemonitor_start_wait_successful(self): + ins = basemonitor.BaseMonitor(self.monitor_cfg, None) + ins.start_monitor() + ins.wait_monitor() + + + def test__basemonitor_all_successful(self): + ins = self.MonitorSimple(self.monitor_cfg, None) + ins.setup() + ins.run() + ins.verify_SLA() + + @mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.multiprocessing') + def test__basemonitor_func_false(self, mock_multiprocess): + ins = self.MonitorSimple(self.monitor_cfg, None) + ins.setup() + mock_multiprocess.Event().is_set.return_value = False + ins.run() + ins.verify_SLA() + + def test__basemonitor_getmonitorcls_successfule(self): + cls = None + try: + cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg) + except Exception: + pass + self.assertIsNone(cls) + diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor.py b/tests/unit/benchmark/scenarios/availability/test_monitor.py deleted file mode 100644 index 793871ca3..000000000 --- a/tests/unit/benchmark/scenarios/availability/test_monitor.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python - -############################################################################## -# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -# Unittest for yardstick.benchmark.scenarios.availability.monitor - -import mock -import unittest - -from yardstick.benchmark.scenarios.availability import monitor - -@mock.patch('yardstick.benchmark.scenarios.availability.monitor.subprocess') -class MonitorTestCase(unittest.TestCase): - - def test__fun_execute_shell_command_successful(self, mock_subprocess): - cmd = "env" - mock_subprocess.check_output.return_value = (0, 'unittest') - exitcode, output = monitor._execute_shell_command(cmd) - self.assertEqual(exitcode, 0) - - def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): - cmd = "env" - mock_subprocess.check_output.side_effect = RuntimeError - exitcode, output = monitor._execute_shell_command(cmd) - self.assertEqual(exitcode, -1) - - def test__fun_monitor_process_successful(self, mock_subprocess): - config = { - 'monitor_cmd':'env', - 'duration':0 - } - mock_queue = mock.Mock() - mock_event = mock.Mock() - - mock_subprocess.check_output.return_value = (0, 'unittest') - monitor._monitor_process(config, mock_queue, mock_event) - - def test__fun_monitor_process_fail_cmd_execute_error(self, mock_subprocess): - config = { - 'monitor_cmd':'env', - 'duration':0 - } - mock_queue = mock.Mock() - mock_event = mock.Mock() - - mock_subprocess.check_output.side_effect = RuntimeError - monitor._monitor_process(config, mock_queue, mock_event) - - def test__fun_monitor_process_fail_no_monitor_cmd(self, mock_subprocess): - config = { - 'duration':0 - } - mock_queue = mock.Mock() - mock_event = mock.Mock() - - mock_subprocess.check_output.return_value = (-1, 'unittest') - monitor._monitor_process(config, mock_queue, mock_event) - - @mock.patch('yardstick.benchmark.scenarios.availability.monitor.multiprocessing') - def test_monitor_all_successful(self, mock_multip, mock_subprocess): - config = { - 'monitor_cmd':'env', - 'duration':0 - } - p = monitor.Monitor() - p.setup(config) - mock_multip.Queue().get.return_value = 'started' - p.start() - - result = "monitor unitest" - mock_multip.Queue().get.return_value = result - p.stop() - - ret = p.get_result() - - self.assertEqual(result, ret) diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py new file mode 100644 index 000000000..c8cda7dc7 --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.monitor import monitor_command + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess') +class ExecuteShellTestCase(unittest.TestCase): + + def test__fun_execute_shell_command_successful(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.return_value = (0, 'unittest') + exitcode, output = monitor_command._execute_shell_command(cmd) + self.assertEqual(exitcode, 0) + + def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.side_effect = RuntimeError + exitcode, output = monitor_command._execute_shell_command(cmd) + self.assertEqual(exitcode, -1) + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess') +class MonitorOpenstackCmdTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.config = { + 'monitor_type': 'openstack-api', + 'command_name': 'nova image-list', + 'monitor_time': 1, + 'sla': {'max_outage_time': 5} + } + + + def test__monitor_command_monitor_func_successful(self, mock_subprocess): + + instance = monitor_command.MonitorOpenstackCmd(self.config, None) + instance.setup() + mock_subprocess.check_output.return_value = (0, 'unittest') + ret = instance.monitor_func() + self.assertEqual(ret, True) + instance._result = {"outage_time": 0} + instance.verify_SLA() + + def test__monitor_command_monitor_func_failure(self, mock_subprocess): + mock_subprocess.check_output.return_value = (1, 'unittest') + instance = monitor_command.MonitorOpenstackCmd(self.config, None) + instance.setup() + mock_subprocess.check_output.side_effect = RuntimeError + ret = instance.monitor_func() + self.assertEqual(ret, False) + instance._result = {"outage_time": 10} + instance.verify_SLA() + + @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.ssh') + def test__monitor_command_ssh_monitor_successful(self, mock_ssh, mock_subprocess): + + self.config["host"] = "node1" + instance = monitor_command.MonitorOpenstackCmd(self.config, self.context) + instance.setup() + mock_ssh.SSH().execute.return_value = (0, "0", '') + ret = instance.monitor_func() diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py new file mode 100644 index 000000000..dda104b4e --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_process + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.monitor import monitor_process + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_process.ssh') +class MonitorProcessTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.monitor_cfg = { + 'monitor_type': 'process', + 'process_name': 'nova-api', + 'host': "node1", + 'monitor_time': 1, + 'sla': {'max_recover_time': 5} + } + + def test__monitor_process_all_successful(self, mock_ssh): + + ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "1", '') + ins.setup() + ins.monitor_func() + ins._result = {"outage_time": 0} + ins.verify_SLA() + + def test__monitor_process_down_failuer(self, mock_ssh): + + ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "0", '') + ins.setup() + ins.monitor_func() + ins._result = {"outage_time": 10} + ins.verify_SLA() + diff --git a/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/tests/unit/benchmark/scenarios/availability/test_serviceha.py index 861bacdc9..6e58b6e7a 100644 --- a/tests/unit/benchmark/scenarios/availability/test_serviceha.py +++ b/tests/unit/benchmark/scenarios/availability/test_serviceha.py @@ -16,138 +16,58 @@ import unittest from yardstick.benchmark.scenarios.availability import serviceha -@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.ssh') +@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.basemonitor') +@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.baseattacker') class ServicehaTestCase(unittest.TestCase): def setUp(self): - self.args = { - 'options':{ - 'component':'nova-api', - 'fault_type':'stop-service', - 'fault_time':0 - }, - 'sla':{ - 'outage_time':'2' - } + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" } - self.ctx = { - 'host': { - 'ip': '10.20.0.3', - 'user': 'cirros', - 'key_filename': 'mykey.key' - } + self.ctx = {"nodes": {"node1": host}} + attacker_cfg = { + "fault_type": "kill-process", + "process_name": "nova-api", + "host": "node1" } + attacker_cfgs = [] + attacker_cfgs.append(attacker_cfg) + monitor_cfg = { + "monitor_cmd": "nova image-list", + "monitor_time": 0.1 + } + monitor_cfgs = [] + monitor_cfgs.append(monitor_cfg) - def test__serviceha_setup_successful(self, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - - self.assertEqual(p.setup_done, True) - - def test__serviceha_setup_fail_service(self, mock_ssh): - - self.args['options']['component'] = 'error' - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - - self.assertEqual(p.setup_done, False) - - def test__serviceha_setup_fail_fault_type(self, mock_ssh): - - self.args['options']['fault_type'] = 'error' - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - - self.assertEqual(p.setup_done, False) - - def test__serviceha_setup_fail_check(self, mock_ssh): - - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'error', '') - p.setup() - - self.assertEqual(p.setup_done, False) - - def test__serviceha_setup_fail_script(self, mock_ssh): + options = { + "attackers": attacker_cfgs, + "monitors": monitor_cfgs + } + sla = {"outage_time": 5} + self.args = {"options": options, "sla": sla} + def test__serviceha_setup_run_successful(self, mock_attacker, mock_monitor): p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (-1, 'false', '') - - self.assertRaises(RuntimeError, p.setup) - self.assertEqual(p.setup_done, False) - - @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor') - def test__serviceha_run_successful(self, mock_monitor, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') p.setup() - - monitor_result = {'total_time': 5, 'outage_time': 0, 'total_count': 16, 'outage_count': 0} - mock_monitor.Monitor().get_result.return_value = monitor_result - - p.connection = mock_ssh.SSH() - mock_ssh.SSH().execute.return_value = (0, 'success', '') - - result = {} - p.run(result) - self.assertEqual(result,{ 'outage_time': 0}) - - def test__serviceha_run_fail_nosetup(self, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - p.run(None) - - @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor') - def test__serviceha_run_fail_script(self, mock_monitor, mock_ssh): + self.assertEqual(p.setup_done, True) + mock_monitor.MonitorMgr().verify_SLA.return_value = True + ret = {} + p.run(ret) + p.teardown() +""" + def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor): p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - - monitor_result = {'total_time': 5, 'outage_time': 0, 'total_count': 16, 'outage_count': 0} - mock_monitor.Monitor().get_result.return_value = monitor_result - p.connection = mock_ssh.SSH() - mock_ssh.SSH().execute.return_value = (-1, 'error', '') - - result = {} - self.assertRaises(RuntimeError, p.run, result) - - @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor') - def test__serviceha_run_fail_sla(self, mock_monitor, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') p.setup() - - monitor_result = {'total_time': 10, 'outage_time': 5, 'total_count': 16, 'outage_count': 0} - mock_monitor.Monitor().get_result.return_value = monitor_result - - p.connection = mock_ssh.SSH() - mock_ssh.SSH().execute.return_value = (0, 'success', '') + self.assertEqual(p.setup_done, True) result = {} - self.assertRaises(AssertionError, p.run, result) - - def test__serviceha_teardown_successful(self, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - p.need_teardown = True - - mock_ssh.SSH().execute.return_value = (0, 'success', '') - p.teardown() - - self.assertEqual(p.need_teardown, False) - - def test__serviceha_teardown_fail_script(self, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - p.need_teardown = True - - mock_ssh.SSH().execute.return_value = (-1, 'false', '') - - self.assertRaises(RuntimeError, p.teardown) + result["outage_time"] = 10 + mock_monitor.Monitor().get_result.return_value = result + ret = {} + self.assertRaises(AssertionError, p.run, ret) +""" diff --git a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py index a87b39142..807429025 100644 --- a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py +++ b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py @@ -22,41 +22,65 @@ from yardstick.benchmark.scenarios.compute import cyclictest class CyclictestTestCase(unittest.TestCase): def setUp(self): - self.ctx = { + self.scenario_cfg = { + "host": "kvm.LF", + "setup_options": { + "rpm_dir": "/opt/rpm", + "host_setup_seqs": [ + "host-setup0.sh", + "host-setup1.sh", + "host-run-qemu.sh" + ], + "script_dir": "/opt/scripts", + "image_dir": "/opt/image", + "guest_setup_seqs": [ + "guest-setup0.sh", + "guest-setup1.sh" + ] + }, + "sla": { + "action": "monitor", + "max_min_latency": 50, + "max_avg_latency": 100, + "max_max_latency": 1000 + }, + "options": { + "priority": 99, + "threads": 1, + "loops": 1000, + "affinity": 1, + "interval": 1000, + "histogram": 90 + } + } + self.context_cfg = { "host": { - "ip": "192.168.50.28", - "user": "root", - "key_filename": "mykey.key" + "ip": "10.229.43.154", + "key_filename": "/yardstick/resources/files/yardstick_key", + "role": "BareMetal", + "name": "kvm.LF", + "user": "root" } } def test_cyclictest_successful_setup(self, mock_ssh): - c = cyclictest.Cyclictest({}, self.ctx) - c.setup() - + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) mock_ssh.SSH().execute.return_value = (0, '', '') - self.assertIsNotNone(c.client) + + c.setup() + self.assertIsNotNone(c.guest) + self.assertIsNotNone(c.host) self.assertEqual(c.setup_done, True) def test_cyclictest_successful_no_sla(self, mock_ssh): - - options = { - "affinity": 2, - "interval": 100, - "priority": 88, - "loops": 10000, - "threads": 2, - "histogram": 80 - } - args = { - "options": options, - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.pop("sla", None) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() - + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -65,29 +89,19 @@ class CyclictestTestCase(unittest.TestCase): self.assertEqual(result, expected_result) def test_cyclictest_successful_sla(self, mock_ssh): - - options = { - "affinity": 2, - "interval": 100, - "priority": 88, - "loops": 10000, - "threads": 2, - "histogram": 80 - } - sla = { - "max_min_latency": 100, - "max_avg_latency": 500, - "max_max_latency": 1000, - } - args = { - "options": options, - "sla": sla - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": { + "action": "monitor", + "max_min_latency": 100, + "max_avg_latency": 500, + "max_max_latency": 1000 + } + }) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() - + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -97,14 +111,13 @@ class CyclictestTestCase(unittest.TestCase): def test_cyclictest_unsuccessful_sla_min_latency(self, mock_ssh): - args = { - "options": {}, - "sla": {"max_min_latency": 10} - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": {"max_min_latency": 10}}) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -112,14 +125,13 @@ class CyclictestTestCase(unittest.TestCase): def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh): - args = { - "options": {}, - "sla": {"max_avg_latency": 10} - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": {"max_avg_latency": 10}}) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -127,14 +139,13 @@ class CyclictestTestCase(unittest.TestCase): def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh): - args = { - "options": {}, - "sla": {"max_max_latency": 10} - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": {"max_max_latency": 10}}) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -142,27 +153,13 @@ class CyclictestTestCase(unittest.TestCase): def test_cyclictest_unsuccessful_script_error(self, mock_ssh): - options = { - "affinity": 2, - "interval": 100, - "priority": 88, - "loops": 10000, - "threads": 2, - "histogram": 80 - } - sla = { - "max_min_latency": 100, - "max_avg_latency": 500, - "max_max_latency": 1000, - } - args = { - "options": options, - "sla": sla - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": {"max_max_latency": 10}}) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() + c.guest = mock_ssh.SSH() mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, c.run, result) diff --git a/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/tests/unit/benchmark/scenarios/compute/test_unixbench.py new file mode 100644 index 000000000..0935bcad2 --- /dev/null +++ b/tests/unit/benchmark/scenarios/compute/test_unixbench.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.compute.unixbench.Unixbench + +import mock +import unittest +import json + +from yardstick.benchmark.scenarios.compute import unixbench + + +@mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh') +class UnixbenchTestCase(unittest.TestCase): + + def setUp(self): + self.ctx = { + "host": { + "ip": "192.168.50.28", + "user": "root", + "key_filename": "mykey.key" + } + } + + def test_unixbench_successful_setup(self, mock_ssh): + + u = unixbench.Unixbench({}, self.ctx) + u.setup() + + mock_ssh.SSH().execute.return_value = (0, '', '') + self.assertIsNotNone(u.client) + self.assertEqual(u.setup_done, True) + + def test_unixbench_successful_no_sla(self, mock_ssh): + + options = { + "test_type": 'dhry2reg', + "run_mode": 'verbose' + } + args = { + "options": options, + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + + sample_output = '{"Score":"4425.4"}' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + + u.run(result) + expected_result = json.loads(sample_output) + self.assertEqual(result, expected_result) + + def test_unixbench_successful_in_quiet_mode(self, mock_ssh): + + options = { + "test_type": 'dhry2reg', + "run_mode": 'quiet', + "copies":1 + } + args = { + "options": options, + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + + sample_output = '{"Score":"4425.4"}' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + + u.run(result) + expected_result = json.loads(sample_output) + self.assertEqual(result, expected_result) + + + def test_unixbench_successful_sla(self, mock_ssh): + + options = { + "test_type": 'dhry2reg', + "run_mode": 'verbose' + } + sla = { + "single_score": '100', + "parallel_score": '500' + } + args = { + "options": options, + "sla": sla + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + + sample_output = '{"signle_score":"2251.7","parallel_score":"4395.9"}' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + + u.run(result) + expected_result = json.loads(sample_output) + self.assertEqual(result, expected_result) + + def test_unixbench_unsuccessful_sla_single_score(self, mock_ssh): + + args = { + "options": {}, + "sla": {"single_score": "500"} + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}' + + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + self.assertRaises(AssertionError, u.run, result) + + def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh): + + args = { + "options": {}, + "sla": {"parallel_score": "4000"} + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}' + + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + self.assertRaises(AssertionError, u.run, result) + + def test_unixbench_unsuccessful_script_error(self, mock_ssh): + + options = { + "test_type": 'dhry2reg', + "run_mode": 'verbose' + } + sla = { + "single_score": '100', + "parallel_score": '500' + } + args = { + "options": options, + "sla": sla + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + + mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR') + self.assertRaises(RuntimeError, u.run, result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/dummy/__init__.py b/tests/unit/benchmark/scenarios/dummy/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/tests/unit/benchmark/scenarios/dummy/__init__.py diff --git a/tests/unit/benchmark/scenarios/dummy/test_dummy.py b/tests/unit/benchmark/scenarios/dummy/test_dummy.py new file mode 100644 index 000000000..1f9b729a9 --- /dev/null +++ b/tests/unit/benchmark/scenarios/dummy/test_dummy.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.dummy.dummy + +import unittest + +from yardstick.benchmark.scenarios.dummy import dummy + + +class DummyTestCase(unittest.TestCase): + + def setUp(self): + self.test_context = dummy.Dummy(None, None) + + self.assertIsNone(self.test_context.scenario_cfg) + self.assertIsNone(self.test_context.context_cfg) + self.assertEqual(self.test_context.setup_done, False) + + def test_run(self): + result = {} + self.test_context.run(result) + + self.assertEqual(result["hello"], "yardstick") + self.assertEqual(self.test_context.setup_done, True) diff --git a/tests/unit/benchmark/scenarios/networking/test_ping6.py b/tests/unit/benchmark/scenarios/networking/test_ping6.py new file mode 100644 index 000000000..662b85c30 --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_ping6.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.ping.Ping + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import ping6 + + +class PingTestCase(unittest.TestCase): + + def setUp(self): + self.ctx = { + 'host': { + 'ip': '172.16.0.137', + 'user': 'cirros', + 'key_filename': "mykey.key", + 'password': "root" + }, + } + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_pktgen_successful_setup(self, mock_ssh): + + p = ping6.Ping6({}, self.ctx) + mock_ssh.SSH().execute.return_value = (0, '0', '') + p.setup() + + self.assertEqual(p.setup_done, True) + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_ping_successful_no_sla(self, mock_ssh): + + result = {} + + p = ping6.Ping6({}, self.ctx) + p.client = mock_ssh.SSH() + mock_ssh.SSH().execute.return_value = (0, '100', '') + p.run(result) + self.assertEqual(result, {'rtt': 100.0}) + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_ping_successful_sla(self, mock_ssh): + + args = { + 'sla': {'max_rtt': 150} + } + result = {} + + p = ping6.Ping6(args, self.ctx) + p.client = mock_ssh.SSH() + mock_ssh.SSH().execute.return_value = (0, '100', '') + p.run(result) + self.assertEqual(result, {'rtt': 100.0}) + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_ping_unsuccessful_sla(self, mock_ssh): + + args = { + 'options': {'packetsize': 200}, + 'sla': {'max_rtt': 50} + } + result = {} + + p = ping6.Ping6(args, self.ctx) + p.client = mock_ssh.SSH() + mock_ssh.SSH().execute.return_value = (0, '100', '') + self.assertRaises(AssertionError, p.run, result) + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_ping_unsuccessful_script_error(self, mock_ssh): + + args = { + 'options': {'packetsize': 200}, + 'sla': {'max_rtt': 50} + } + result = {} + + p = ping6.Ping6(args, self.ctx) + p.client = mock_ssh.SSH() + mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR') + self.assertRaises(RuntimeError, p.run, result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py new file mode 100644 index 000000000..418dd39e6 --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation + + +class VtcInstantiationValidationTestCase(unittest.TestCase): + + def setUp(self): + scenario = dict() + scenario['options'] = dict() + scenario['options']['default_net_name'] = '' + scenario['options']['default_subnet_name'] = '' + scenario['options']['vlan_net_1_name'] = '' + scenario['options']['vlan_subnet_1_name'] = '' + scenario['options']['vlan_net_2_name'] = '' + scenario['options']['vlan_subnet_2_name'] = '' + scenario['options']['vnic_type'] = '' + scenario['options']['vtc_flavor'] = '' + scenario['options']['packet_size'] = '' + scenario['options']['vlan_sender'] = '' + scenario['options']['vlan_receiver'] = '' + + self.vt = vtc_instantiation_validation.VtcInstantiationValidation(scenario, '') + + def test_run_for_success(self): + result = {} + self.vt.run(result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py new file mode 100644 index 000000000..e0a46241c --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation_noisy + + +class VtcInstantiationValidationiNoisyTestCase(unittest.TestCase): + + def setUp(self): + scenario = dict() + scenario['options'] = dict() + scenario['options']['default_net_name'] = '' + scenario['options']['default_subnet_name'] = '' + scenario['options']['vlan_net_1_name'] = '' + scenario['options']['vlan_subnet_1_name'] = '' + scenario['options']['vlan_net_2_name'] = '' + scenario['options']['vlan_subnet_2_name'] = '' + scenario['options']['vnic_type'] = '' + scenario['options']['vtc_flavor'] = '' + scenario['options']['packet_size'] = '' + scenario['options']['vlan_sender'] = '' + scenario['options']['vlan_receiver'] = '' + scenario['options']['num_of_neighbours'] = '1' + scenario['options']['amount_of_ram'] = '1G' + scenario['options']['number_of_cores'] = '1' + + self.vt = vtc_instantiation_validation_noisy.VtcInstantiationValidationNoisy(scenario, '') + + def test_run_for_success(self): + result = {} + self.vt.run(result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py new file mode 100644 index 000000000..ecdf555d2 --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import vtc_throughput + + +class VtcThroughputTestCase(unittest.TestCase): + + def setUp(self): + scenario = dict() + scenario['options'] = dict() + scenario['options']['default_net_name'] = '' + scenario['options']['default_subnet_name'] = '' + scenario['options']['vlan_net_1_name'] = '' + scenario['options']['vlan_subnet_1_name'] = '' + scenario['options']['vlan_net_2_name'] = '' + scenario['options']['vlan_subnet_2_name'] = '' + scenario['options']['vnic_type'] = '' + scenario['options']['vtc_flavor'] = '' + scenario['options']['packet_size'] = '' + scenario['options']['vlan_sender'] = '' + scenario['options']['vlan_receiver'] = '' + + self.vt = vtc_throughput.VtcThroughput(scenario, '') + + def test_run_for_success(self): + result = {} + self.vt.run(result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py new file mode 100644 index 000000000..98957b1de --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import vtc_throughput_noisy + + +class VtcThroughputNoisyTestCase(unittest.TestCase): + + def setUp(self): + scenario = dict() + scenario['options'] = dict() + scenario['options']['default_net_name'] = '' + scenario['options']['default_subnet_name'] = '' + scenario['options']['vlan_net_1_name'] = '' + scenario['options']['vlan_subnet_1_name'] = '' + scenario['options']['vlan_net_2_name'] = '' + scenario['options']['vlan_subnet_2_name'] = '' + scenario['options']['vnic_type'] = '' + scenario['options']['vtc_flavor'] = '' + scenario['options']['packet_size'] = '' + scenario['options']['vlan_sender'] = '' + scenario['options']['vlan_receiver'] = '' + scenario['options']['num_of_neighbours'] = '1' + scenario['options']['amount_of_ram'] = '1G' + scenario['options']['number_of_cores'] = '1' + + self.vt = vtc_throughput_noisy.VtcThroughputNoisy(scenario, '') + + def test_run_for_success(self): + result = {} + self.vt.run(result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/parser/__init__.py b/tests/unit/benchmark/scenarios/parser/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/tests/unit/benchmark/scenarios/parser/__init__.py diff --git a/tests/unit/benchmark/scenarios/parser/test_parser.py b/tests/unit/benchmark/scenarios/parser/test_parser.py new file mode 100644 index 000000000..d11a6d5c8 --- /dev/null +++ b/tests/unit/benchmark/scenarios/parser/test_parser.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.parser.Parser + +import mock +import unittest +import json + +from yardstick.benchmark.scenarios.parser import parser + +@mock.patch('yardstick.benchmark.scenarios.parser.parser.subprocess') +class ParserTestCase(unittest.TestCase): + + def setUp(self): + pass + + def test_parser_successful_setup(self, mock_subprocess): + + p = parser.Parser({}, {}) + mock_subprocess.call().return_value = 0 + p.setup() + self.assertEqual(p.setup_done, True) + + def test_parser_successful(self, mock_subprocess): + args = { + 'options': {'yangfile':'/root/yardstick/samples/yang.yaml', + 'toscafile':'/root/yardstick/samples/tosca.yaml'}, + } + p = parser.Parser(args, {}) + result = {} + mock_subprocess.call().return_value = 0 + sample_output = '{"yangtotosca": "success"}' + + p.run(result) + expected_result = json.loads(sample_output) + + def test_parser_teardown_successful(self, mock_subprocess): + + p = parser.Parser({}, {}) + mock_subprocess.call().return_value = 0 + p.teardown() + self.assertEqual(p.teardown_done, True) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/dispatcher/__init__.py b/tests/unit/dispatcher/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/tests/unit/dispatcher/__init__.py diff --git a/tests/unit/dispatcher/test_influxdb.py b/tests/unit/dispatcher/test_influxdb.py new file mode 100644 index 000000000..5553c86a9 --- /dev/null +++ b/tests/unit/dispatcher/test_influxdb.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.dispatcher.influxdb + +import mock +import unittest + +from yardstick.dispatcher.influxdb import InfluxdbDispatcher + +class InfluxdbDispatcherTestCase(unittest.TestCase): + + def setUp(self): + self.data1 = { + "runner_id": 8921, + "context_cfg": { + "host": { + "ip": "10.229.43.154", + "key_filename": "/root/yardstick/yardstick/resources/files/yardstick_key", + "name": "kvm.LF", + "user": "root" + }, + "target": { + "ipaddr": "10.229.44.134" + } + }, + "scenario_cfg": { + "runner": { + "interval": 1, + "object": "yardstick.benchmark.scenarios.networking.ping.Ping", + "output_filename": "/tmp/yardstick.out", + "runner_id": 8921, + "duration": 10, + "type": "Duration" + }, + "host": "kvm.LF", + "type": "Ping", + "target": "10.229.44.134", + "sla": { + "action": "monitor", + "max_rtt": 10 + }, + "tc": "ping", + "task_id": "ea958583-c91e-461a-af14-2a7f9d7f79e7" + } + } + self.data2 = { + "benchmark": { + "timestamp": "1451478117.883505", + "errors": "", + "data": { + "rtt": 0.613 + }, + "sequence": 1 + }, + "runner_id": 8921 + } + self.data3 ={ + "benchmark": { + "data": { + "mpstat": { + "cpu0": { + "%sys": "0.00", + "%idle": "99.00" + }, + "loadavg": [ + "1.09", + "0.29" + ] + }, + "rtt": "1.03" + } + } + } + + def test_record_result_data_no_target(self): + influxdb = InfluxdbDispatcher(None) + influxdb.target = '' + self.assertEqual(influxdb.record_result_data(self.data1), -1) + + def test_record_result_data_no_case_name(self): + influxdb = InfluxdbDispatcher(None) + self.assertEqual(influxdb.record_result_data(self.data2), -1) + + @mock.patch('yardstick.dispatcher.influxdb.requests') + def test_record_result_data(self, mock_requests): + type(mock_requests.post.return_value).status_code = 204 + influxdb = InfluxdbDispatcher(None) + self.assertEqual(influxdb.record_result_data(self.data1), 0) + self.assertEqual(influxdb.record_result_data(self.data2), 0) + self.assertEqual(influxdb.flush_result_data(), 0) + + def test__dict_key_flatten(self): + line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00' + influxdb = InfluxdbDispatcher(None) + flattened_data = influxdb._dict_key_flatten(self.data3['benchmark']['data']) + result = ",".join([k+"="+v for k, v in flattened_data.items()]) + self.assertEqual(result, line) + + def test__get_nano_timestamp(self): + influxdb = InfluxdbDispatcher(None) + results = {'benchmark': {'timestamp': '1451461248.925574'}} + self.assertEqual(influxdb._get_nano_timestamp(results), '1451461248925574144') + + @mock.patch('yardstick.dispatcher.influxdb.time') + def test__get_nano_timestamp_except(self, mock_time): + results = {} + influxdb = InfluxdbDispatcher(None) + mock_time.time.return_value = 1451461248.925574 + self.assertEqual(influxdb._get_nano_timestamp(results), '1451461248925574144') + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/dispatcher/test_influxdb_line_protocol.py b/tests/unit/dispatcher/test_influxdb_line_protocol.py new file mode 100644 index 000000000..cb05bf4d2 --- /dev/null +++ b/tests/unit/dispatcher/test_influxdb_line_protocol.py @@ -0,0 +1,55 @@ +# Unittest for yardstick.dispatcher.influxdb_line_protocol + +# yardstick comment: this file is a modified copy of +# influxdb-python/influxdb/tests/test_line_protocol.py + +import unittest +from yardstick.dispatcher.influxdb_line_protocol import make_lines + + +class TestLineProtocol(unittest.TestCase): + + def test_make_lines(self): + data = { + "tags": { + "empty_tag": "", + "none_tag": None, + "integer_tag": 2, + "string_tag": "hello" + }, + "points": [ + { + "measurement": "test", + "fields": { + "string_val": "hello!", + "int_val": 1, + "float_val": 1.1, + "none_field": None, + "bool_val": True, + } + } + ] + } + + self.assertEqual( + make_lines(data), + 'test,integer_tag=2,string_tag=hello ' + 'bool_val=True,float_val=1.1,int_val=1i,string_val="hello!"\n' + ) + + def test_string_val_newline(self): + data = { + "points": [ + { + "measurement": "m1", + "fields": { + "multi_line": "line1\nline1\nline3" + } + } + ] + } + + self.assertEqual( + make_lines(data), + 'm1 multi_line="line1\\nline1\\nline3"\n' + ) |