diff options
44 files changed, 1027 insertions, 502 deletions
diff --git a/api/resources/v1/env.py b/api/resources/v1/env.py index 04cc659c7..7c831fd74 100644 --- a/api/resources/v1/env.py +++ b/api/resources/v1/env.py @@ -101,21 +101,15 @@ class V1Env(ApiResource): def _create_data_source(self, ip): url = 'http://admin:admin@{}:{}/api/datasources'.format(ip, consts.GRAFANA_PORT) - influx_conf = utils.parse_ini_file(consts.CONF_FILE) - - try: - influx_url = influx_conf['dispatcher_influxdb']['target'] - except KeyError: - LOG.exception('influxdb url not set in yardstick.conf') - raise + influx_conf = utils.parse_ini_file(consts.CONF_FILE).get('dispatcher_influxdb', {}) data = { "name": "yardstick", "type": "influxdb", "access": "proxy", - "url": influx_url, - "password": "root", - "user": "root", + "url": influx_conf.get('target', ''), + "password": influx_conf.get('password', ''), + "user": influx_conf.get('username', ''), "database": "yardstick", "basicAuth": True, "basicAuthUser": "admin", diff --git a/api/resources/v2/containers.py b/api/resources/v2/containers.py index 66dc94120..ee1903901 100644 --- a/api/resources/v2/containers.py +++ b/api/resources/v2/containers.py @@ -272,21 +272,15 @@ class V2Containers(ApiResource): def _create_data_source(self, ip): url = 'http://admin:admin@{}:{}/api/datasources'.format(ip, 3000) - - influx_conf = utils.parse_ini_file(consts.CONF_FILE) - try: - influx_url = influx_conf['dispatcher_influxdb']['target'] - except KeyError: - LOG.exception('influxdb url not set in yardstick.conf') - raise + influx_conf = utils.parse_ini_file(consts.CONF_FILE).get('dispatcher_influxdb', {}) data = { "name": "yardstick", "type": "influxdb", "access": "proxy", - "url": influx_url, - "password": "root", - "user": "root", + "url": influx_conf.get('target', ''), + "password": influx_conf.get('password', ''), + "user": influx_conf.get('username', ''), "database": "yardstick", "basicAuth": True, "basicAuthUser": "admin", diff --git a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia_4port.yaml b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia_4port.yaml new file mode 100644 index 000000000..89842bee9 --- /dev/null +++ b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia_4port.yaml @@ -0,0 +1,50 @@ +# Copyright (c) 2016-2017 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# vfw_ipv4_profile_1flows.ixncfg + +--- +schema: yardstick:task:0.1 +scenarios: +- type: NSPerf + traffic_profile: "../../traffic_profiles/ixia_ipv4_latency.yaml" + topology: vfw_vnf_topology_ixia_4port.yaml + nodes: + tg__0: trafficgen_1.yardstick + vnf__0: vnf.yardstick + options: + framesize: + private: {64B: 100} + public: {64B: 100} + flow: + src_ip: [{'tg__0': 'xe0'}, {'tg__0': 'xe2'}] + dst_ip: [{'tg__0': 'xe1'}, {'tg__0': 'xe3'}] + count: 1 + traffic_type: 4 + rfc2544: + allowed_drop_rate: 0.0001 - 0.0001 + vnf__0: + rules: acl_1rule.yaml + vnf_config: {lb_config: 'SW', lb_count: 2, worker_config: '1C/1T', worker_threads: 1} + nfvi_enable: True + runner: + type: Iteration + iterations: 10 + interval: 35 + ixia_profile: ../../traffic_profiles/vfw/vfw_ipv4_profile_muttiport.ixncfg +context: + type: Node + name: yardstick + nfvi_type: baremetal + file: /etc/yardstick/nodes/pod_ixia_4port.yaml +#/etc/yardstick/nodes/pod_ixia.yaml diff --git a/samples/vnf_samples/nsut/vfw/vfw_vnf_topology_ixia_4port.yaml b/samples/vnf_samples/nsut/vfw/vfw_vnf_topology_ixia_4port.yaml new file mode 100644 index 000000000..04b3de9f0 --- /dev/null +++ b/samples/vnf_samples/nsut/vfw/vfw_vnf_topology_ixia_4port.yaml @@ -0,0 +1,72 @@ +# Copyright (c) 2016-2017 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +nsd:nsd-catalog: + nsd: + - id: vFW + name: vFW + short-name: vFW + description: scenario with vFW VNF + constituent-vnfd: + - member-vnf-index: '1' + vnfd-id-ref: tg__0 + VNF model: ../../vnf_descriptors/ixia_rfc2544_tpl.yaml + - member-vnf-index: '2' + vnfd-id-ref: vnf__0 + VNF model: ../../vnf_descriptors/vfw_vnf.yaml + + vld: + - id: uplink_0 + name: tg__0 to vnf__0 link 1 + type: ELAN + vnfd-connection-point-ref: + - member-vnf-index-ref: '1' + vnfd-connection-point-ref: xe0 + vnfd-id-ref: tg__0 + - member-vnf-index-ref: '2' + vnfd-connection-point-ref: xe0 + vnfd-id-ref: vnf__0 + + - id: downlink_0 + name: vnf__0 to tg__0 link 2 + type: ELAN + vnfd-connection-point-ref: + - member-vnf-index-ref: '2' + vnfd-connection-point-ref: xe1 + vnfd-id-ref: vnf__0 + - member-vnf-index-ref: '1' + vnfd-connection-point-ref: xe1 + vnfd-id-ref: tg__0 + + - id: uplink_1 + name: tg__0 to vnf__0 link 3 + type: ELAN + vnfd-connection-point-ref: + - member-vnf-index-ref: '1' + vnfd-connection-point-ref: xe2 + vnfd-id-ref: tg__0 + - member-vnf-index-ref: '2' + vnfd-connection-point-ref: xe2 + vnfd-id-ref: vnf__0 + + - id: downlink_1 + name: vnf__0 to tg__0 link 4 + type: ELAN + vnfd-connection-point-ref: + - member-vnf-index-ref: '2' + vnfd-connection-point-ref: xe3 + vnfd-id-ref: vnf__0 + - member-vnf-index-ref: '1' + vnfd-connection-point-ref: xe3 + vnfd-id-ref: tg__0 diff --git a/samples/vnf_samples/nsut/vpe/vpe_vnf_topology-3node.yaml b/samples/vnf_samples/nsut/vpe/vpe_vnf_topology-3node.yaml index 8a1d335de..7bd4bff84 100644 --- a/samples/vnf_samples/nsut/vpe/vpe_vnf_topology-3node.yaml +++ b/samples/vnf_samples/nsut/vpe/vpe_vnf_topology-3node.yaml @@ -30,7 +30,7 @@ nsd:nsd-catalog: VNF model: ../../vnf_descriptors/udp_replay_vnf.yaml #tg_vpe_upstream.yaml #VPE VNF vld: - - id: uplink + - id: uplink_0 name: tg__0 to vnf__0 link 1 type: ELAN vnfd-connection-point-ref: @@ -41,7 +41,7 @@ nsd:nsd-catalog: vnfd-connection-point-ref: xe0 vnfd-id-ref: vnf__0 #VNF - - id: downlink + - id: downlink_0 name: vnf__0 to tg__1 link 2 type: ELAN vnfd-connection-point-ref: diff --git a/samples/vnf_samples/nsut/vpe/vpe_vnf_topology.yaml b/samples/vnf_samples/nsut/vpe/vpe_vnf_topology.yaml index 5ad45028a..4ade967f7 100644 --- a/samples/vnf_samples/nsut/vpe/vpe_vnf_topology.yaml +++ b/samples/vnf_samples/nsut/vpe/vpe_vnf_topology.yaml @@ -27,7 +27,7 @@ nsd:nsd-catalog: VNF model: ../../vnf_descriptors/vpe_vnf.yaml #tg_l3fwd.yaml #tg_trex_tpl.yaml #TREX vld: - - id: uplink + - id: uplink_0 name: tg__0 to vnf__0 link 1 type: ELAN vnfd-connection-point-ref: @@ -38,7 +38,7 @@ nsd:nsd-catalog: vnfd-connection-point-ref: xe0 vnfd-id-ref: vnf__0 #VNF - - id: downlink + - id: downlink_0 name: vnf__0 to tg__0 link 2 type: ELAN vnfd-connection-point-ref: diff --git a/samples/vnf_samples/nsut/vpe/vpe_vnf_topology_ixia.yaml b/samples/vnf_samples/nsut/vpe/vpe_vnf_topology_ixia.yaml index 315a30845..d7e11a696 100644 --- a/samples/vnf_samples/nsut/vpe/vpe_vnf_topology_ixia.yaml +++ b/samples/vnf_samples/nsut/vpe/vpe_vnf_topology_ixia.yaml @@ -27,7 +27,7 @@ nsd:nsd-catalog: VNF model: ../../vnf_descriptors/vpe_vnf.yaml vld: - - id: uplink + - id: uplink_0 name: tg__0 to vnf__0 link 1 type: ELAN vnfd-connection-point-ref: @@ -38,7 +38,7 @@ nsd:nsd-catalog: vnfd-connection-point-ref: xe0 vnfd-id-ref: vnf__0 #VNF - - id: downlink + - id: downlink_0 name: vnf__0 to tg__0 link 2 type: ELAN vnfd-connection-point-ref: diff --git a/samples/vnf_samples/nsut/vpe/vpe_vnf_topology_ixload.yaml b/samples/vnf_samples/nsut/vpe/vpe_vnf_topology_ixload.yaml index aa1dc0d4f..3d93f79e3 100644 --- a/samples/vnf_samples/nsut/vpe/vpe_vnf_topology_ixload.yaml +++ b/samples/vnf_samples/nsut/vpe/vpe_vnf_topology_ixload.yaml @@ -27,7 +27,7 @@ nsd:nsd-catalog: VNF model: ../../vnf_descriptors/vpe_vnf.yaml vld: - - id: uplink + - id: uplink_0 name: tg__0 to vnf__0 link 1 type: ELAN vnfd-connection-point-ref: @@ -38,7 +38,7 @@ nsd:nsd-catalog: vnfd-connection-point-ref: xe0 vnfd-id-ref: vnf__0 #VNF - - id: downlink + - id: downlink_0 name: vnf__0 to tg__0 link 2 type: ELAN vnfd-connection-point-ref: diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml index f42db932c..8bac32404 100644 --- a/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml +++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml @@ -131,15 +131,15 @@ uplink_1: outer_l3v4: proto: "tcp" - srcip4: "{{get(flow, 'flow.src_ip1', '192.168.0.0-192.168.255.255') }}" - dstip4: "{{get(flow, 'flow.dst_ip1', '192.16.0.0-192.16.0.31') }}" + srcip4: "{{get(flow, 'flow.src_ip_1', '192.168.0.0-192.168.255.255') }}" + dstip4: "{{get(flow, 'flow.dst_ip_1', '192.16.0.0-192.16.0.31') }}" count: "{{get(flow, 'flow.count', '1') }}" ttl: 32 dscp: 32 outer_l4: - srcport: "{{get(flow, 'flow.src_port1', '0') }}" - dstport: "{{get(flow, 'flow.dst_port1', '0') }}" + srcport: "{{get(flow, 'flow.src_port_1', '0') }}" + dstport: "{{get(flow, 'flow.dst_port_1', '0') }}" count: "{{get(flow, 'flow.count', '1') }}" downlink_1: ipv4: @@ -155,13 +155,13 @@ downlink_1: outer_l3v4: proto: "tcp" - srcip4: "{{get(flow, 'flow.dst_ip1', '192.16.0.0-192.16.0.31') }}" - dstip4: "{{get(flow, 'flow.src_ip1', '192.168.0.0-192.168.255.255') }}" + srcip4: "{{get(flow, 'flow.dst_ip_1', '192.16.0.0-192.16.0.31') }}" + dstip4: "{{get(flow, 'flow.src_ip_1', '192.168.0.0-192.168.255.255') }}" count: "{{get(flow, 'flow.count', '1') }}" ttl: 32 dscp: 32 outer_l4: - srcport: "{{get(flow, 'flow.dst_port1', '0') }}" - dstport: "{{get(flow, 'flow.src_port1', '0') }}" + srcport: "{{get(flow, 'flow.dst_port_1', '0') }}" + dstport: "{{get(flow, 'flow.src_port_1', '0') }}" count: "{{get(flow, 'flow.count', '1') }}" diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml index af74df2c9..a475173f2 100644 --- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml +++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml @@ -122,8 +122,8 @@ downlink_1: outer_l3v4: proto: "udp" - srcip4: "{{get(flow, 'flow.dst_ip1', '1.1.1.1-1.15.255.255') }}" - dstip4: "{{get(flow, 'flow.src_ip1', '90.90.1.1-90.105.255.255') }}" + srcip4: "{{get(flow, 'flow.dst_ip_1', '1.1.1.1-1.15.255.255') }}" + dstip4: "{{get(flow, 'flow.src_ip_1', '90.90.1.1-90.105.255.255') }}" count: "{{get(flow, 'flow.count', '1') }}" ttl: 32 dscp: 0 diff --git a/samples/vnf_samples/traffic_profiles/vfw/vfw_ipv4_profile_muttiport.ixncfg b/samples/vnf_samples/traffic_profiles/vfw/vfw_ipv4_profile_muttiport.ixncfg Binary files differnew file mode 100644 index 000000000..76accc1ed --- /dev/null +++ b/samples/vnf_samples/traffic_profiles/vfw/vfw_ipv4_profile_muttiport.ixncfg diff --git a/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml b/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml index 69442b7c8..9b2a152f3 100644 --- a/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml +++ b/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml @@ -41,34 +41,10 @@ vnfd:vnfd-catalog: vdu: - id: trexgen-baremetal name: trexgen-baremetal - description: IXIAstateless traffic verifier - external-interface: - - name: xe0 - virtual-interface: - type: PCI-PASSTHROUGH - # Substitution variables MUST be quoted. Otherwise Python can misinterpet them. - vpci: '{{ interfaces.xe0.vpci }}' # Value filled by vnfdgen - local_iface_name: '{{ interfaces.xe0.local_iface_name }}' # Value filled by vnfdgen - local_ip: '{{ interfaces.xe0.local_ip }}' # Value filled by vnfdgen - driver: '{{ interfaces.xe0.driver}}' # Value filled by vnfdgen - dst_ip: '{{ interfaces.xe0.dst_ip }}' # Value filled by vnfdgen - local_mac: '{{ interfaces.xe0.local_mac }}' # Value filled by vnfdgen - dst_mac: '{{ interfaces.xe0.dst_mac }}' # Value filled by vnfdgen - bandwidth: 10 Gbps - vnfd-connection-point-ref: xe0 - - name: xe1 - virtual-interface: - type: PCI-PASSTHROUGH - vpci: '{{ interfaces.xe1.vpci }}' # Value filled by vnfdgen - local_iface_name: '{{ interfaces.xe1.local_iface_name }}' # Value filled by vnfdgen - local_ip: '{{ interfaces.xe1.local_ip }}' # Value filled by vnfdgen - driver: '{{ interfaces.xe1.driver}}' # Value filled by vnfdgen - dst_ip: '{{ interfaces.xe1.dst_ip }}' # Value filled by vnfdgen - local_mac: '{{ interfaces.xe1.local_mac }}' # Value filled by vnfdgen - dst_mac: '{{ interfaces.xe1.dst_mac }}' # Value filled by vnfdgen - bandwidth: 10 Gbps - vnfd-connection-point-ref: xe1 - + description: IXIAstateless traffic verifier + vm-flavor: + vcpu-count: '4' + memory-mb: '4096' benchmark: kpi: - rx_throughput_fps diff --git a/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml b/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml index b430d35bd..ad4953fce 100644 --- a/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml +++ b/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml @@ -16,10 +16,11 @@ vnfd:vnfd-catalog: vnfd: - id: IxLoadTrafficGen - name: IxLoadTrafficGenVnfSshIntel + name: IxLoadTrafficGen short-name: IxLoadTrafficGenVnf description: IxLoad client/server connection details mgmt-interface: + vdu-id: ixloadgen-baremetal user: '{{user}}' # Value filled by vnfdgen password: '{{password}}' # Value filled by vnfdgen ip: '{{ip}}' # Value filled by vnfdgen @@ -32,35 +33,16 @@ vnfd:vnfd-catalog: py_bin_path: '{{tg_config.py_bin_path}}' dut_result_dir: '{{tg_config.dut_result_dir}}' version: '{{tg_config.version}}' + connection-point: + - name: xe0 + type: VPORT + - name: xe1 + type: VPORT + vdu: - id: abclient-baremetal name: abclient-baremetal description: AB client interface details - external-interface: - - name: xe0 - virtual-interface: - type: PCI-PASSTHROUGH - vpci: '{{ interfaces.xe0.vpci }}' # Value filled by vnfdgen - local_iface_name: '{{ interfaces.xe0.local_iface_name }}' # Value filled by vnfdgen - local_ip: '{{ interfaces.xe0.local_ip }}' # Value filled by vnfdgen - dst_ip: '{{ interfaces.xe0.dst_ip }}' # Value filled by vnfdgen - local_mac: '{{ interfaces.xe0.local_mac }}' # Value filled by vnfdgen - dst_mac: '{{ interfaces.xe0.dst_mac }}' # Value filled by vnfdgen - netmask: '{{ interfaces.xe0.netmask }}' # Value filled by vnfdgen - bandwidth: 10 Gbps - vnfd-connection-point-ref: xe0 - - name: xe1 - virtual-interface: - type: PCI-PASSTHROUGH - vpci: '{{ interfaces.xe1.vpci }}' # Value filled by vnfdgen - local_iface_name: '{{ interfaces.xe1.local_iface_name }}' # Value filled by vnfdgen - local_ip: '{{ interfaces.xe1.local_ip }}' # Value filled by vnfdgen - dst_ip: '{{ interfaces.xe1.dst_ip }}' # Value filled by vnfdgen - local_mac: '{{ interfaces.xe1.local_mac }}' # Value filled by vnfdgen - dst_mac: '{{ interfaces.xe1.dst_mac }}' # Value filled by vnfdgen - netmask: '{{ interfaces.xe1.netmask }}' # Value filled by vnfdgen - bandwidth: 10 Gbps - vnfd-connection-point-ref: xe1 benchmark: kpi: - complete_requests @@ -77,4 +59,4 @@ vnfd:vnfd-catalog: - requests_served_95% (ms) - requests_served_98% (ms) - requests_served_99% (ms) - - requests_served_100% (ms)
\ No newline at end of file + - requests_served_100% (ms) @@ -29,6 +29,8 @@ setup( 'benchmark/scenarios/networking/*.txt', 'benchmark/scenarios/parser/*.sh', 'benchmark/scenarios/storage/*.bash', + 'network_services/nfvi/collectd.conf', + 'network_services/nfvi/collectd.sh', 'resources/files/*', 'resources/scripts/install/*.bash', 'resources/scripts/remove/*.bash' diff --git a/tests/ci/prepare_env.sh b/tests/ci/prepare_env.sh index c3ee4c76b..44e2694f7 100755 --- a/tests/ci/prepare_env.sh +++ b/tests/ci/prepare_env.sh @@ -85,36 +85,32 @@ ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" if [ "$INSTALLER_TYPE" == "fuel" ]; then #ip_fuel="10.20.0.2" verify_connectivity $INSTALLER_IP - echo "Fetching id_rsa file from jump_server $INSTALLER_IP..." - sshpass -p r00tme scp 2>/dev/null $ssh_options \ - root@${INSTALLER_IP}:~/.ssh/id_rsa /root/.ssh/id_rsa &> /dev/null - sshpass -p r00tme ssh 2>/dev/null $ssh_options \ - root@${INSTALLER_IP} fuel node>fuel_node + ssh -l ubuntu ${INSTALLER_IP} -i ${SSH_KEY} ${ssh_options} \ + "sudo salt -C 'ctl* or cmp*' grains.get fqdn_ip4 --out yaml">node_info - # update fuel node id and ip info according to the CI env - controller_IDs=($(cat fuel_node|grep controller|awk '{print $1}')) - compute_IDs=($(cat fuel_node|grep compute|awk '{print $1}')) - controller_ips=($(cat fuel_node|grep controller|awk '{print $10}')) - compute_ips=($(cat fuel_node|grep compute|awk '{print $10}')) + # update node ip info according to the CI env + controller_ips=($(cat node_info|awk '/ctl/{getline; print $2}')) + compute_ips=($(cat node_info|awk '/cmp/{getline; print $2}')) pod_yaml="./etc/yardstick/nodes/fuel_baremetal/pod.yaml" node_line_num=($(grep -n node[1-5] $pod_yaml | awk -F: '{print $1}')) + node_ID=0; if [[ ${controller_ips[0]} ]]; then - sed -i "${node_line_num[0]}s/node1/node${controller_IDs[0]}/;s/ip1/${controller_ips[0]}/" $pod_yaml; + sed -i "${node_line_num[0]}s/node1/node$((++node_ID))/;s/ip1/${controller_ips[0]}/" $pod_yaml; fi if [[ ${controller_ips[1]} ]]; then - sed -i "${node_line_num[1]}s/node2/node${controller_IDs[1]}/;s/ip2/${controller_ips[1]}/" $pod_yaml; + sed -i "${node_line_num[1]}s/node2/node$((++node_ID))/;s/ip2/${controller_ips[1]}/" $pod_yaml; fi if [[ ${controller_ips[2]} ]]; then - sed -i "${node_line_num[2]}s/node3/node${controller_IDs[2]}/;s/ip3/${controller_ips[2]}/" $pod_yaml; + sed -i "${node_line_num[2]}s/node3/node$((++node_ID))/;s/ip3/${controller_ips[2]}/" $pod_yaml; fi if [[ ${compute_ips[0]} ]]; then - sed -i "${node_line_num[3]}s/node4/node${compute_IDs[0]}/;s/ip4/${compute_ips[0]}/" $pod_yaml; + sed -i "${node_line_num[3]}s/node4/node$((++node_ID))/;s/ip4/${compute_ips[0]}/" $pod_yaml; fi if [[ ${compute_ips[1]} ]]; then - sed -i "${node_line_num[4]}s/node5/node${compute_IDs[1]}/;s/ip5/${compute_ips[1]}/" $pod_yaml; + sed -i "${node_line_num[4]}s/node5/node$((++node_ID))/;s/ip5/${compute_ips[1]}/" $pod_yaml; fi fi diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml index a37f83b83..3e630caf2 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml @@ -19,7 +19,7 @@ description: > {% set file = file or '/etc/yardstick/pod.yaml' %} {% set jump_host = jump_host or 'node0' %} {% set attack_host = attack_host or 'node1' %} -{% set monitor_time = monitor_time or 180 %} +{% set monitor_time = monitor_time or 30 %} scenarios: - diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc055.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc055.yaml index 7e33741b1..10a7a7108 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc055.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc055.yaml @@ -24,7 +24,6 @@ description: > scenarios: - type: ComputeCapacity - options: nodes: host: {{host}} diff --git a/tests/opnfv/test_suites/opnfv_os-odl-dvr-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-dvr-noha_daily.yaml new file mode 100644 index 000000000..1f66f9b3e --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_os-odl-dvr-noha_daily.yaml @@ -0,0 +1,38 @@ +############################################################################## +# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +# os-odl-dvr-noha daily task suite + +schema: "yardstick:suite:0.1" + +name: "os-odl-dvr-noha" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc010.yaml +- + file_name: opnfv_yardstick_tc011.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc014.yaml +- + file_name: opnfv_yardstick_tc037.yaml +- + file_name: opnfv_yardstick_tc069.yaml +- + file_name: opnfv_yardstick_tc070.yaml +- + file_name: opnfv_yardstick_tc071.yaml +- + file_name: opnfv_yardstick_tc072.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-odl-fdio-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-fdio-ha_daily.yaml new file mode 100644 index 000000000..7716b6ccd --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_os-odl-fdio-ha_daily.yaml @@ -0,0 +1,38 @@ +############################################################################## +# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +# os-odl-fdio-ha daily task suite + +schema: "yardstick:suite:0.1" + +name: "os-odl-fdio-ha" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc010.yaml +- + file_name: opnfv_yardstick_tc011.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc014.yaml +- + file_name: opnfv_yardstick_tc037.yaml +- + file_name: opnfv_yardstick_tc069.yaml +- + file_name: opnfv_yardstick_tc070.yaml +- + file_name: opnfv_yardstick_tc071.yaml +- + file_name: opnfv_yardstick_tc072.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-odl-sfc-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-sfc-noha_daily.yaml new file mode 100644 index 000000000..ba870417d --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_os-odl-sfc-noha_daily.yaml @@ -0,0 +1,62 @@ +############################################################################## +# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +# os-odl-sfc-noha daily task suite + +schema: "yardstick:suite:0.1" + +name: "os-odl-sfc-noha" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc010.yaml +- + file_name: opnfv_yardstick_tc011.yaml +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc014.yaml +- + file_name: opnfv_yardstick_tc037.yaml +- + file_name: opnfv_yardstick_tc055.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC055"}' +- + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- + file_name: opnfv_yardstick_tc069.yaml +- + file_name: opnfv_yardstick_tc070.yaml +- + file_name: opnfv_yardstick_tc071.yaml +- + file_name: opnfv_yardstick_tc072.yaml +- + file_name: opnfv_yardstick_tc075.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node1.LF"}' diff --git a/tests/unit/benchmark/contexts/test_kubernetes.py b/tests/unit/benchmark/contexts/test_kubernetes.py index 4976a9fe0..3a926f85c 100644 --- a/tests/unit/benchmark/contexts/test_kubernetes.py +++ b/tests/unit/benchmark/contexts/test_kubernetes.py @@ -47,13 +47,15 @@ class KubernetesTestCase(unittest.TestCase): # clear kubernetes contexts from global list so we don't break other tests Context.list = [] + @mock.patch('{}.KubernetesContext._delete_services'.format(prefix)) @mock.patch('{}.KubernetesContext._delete_ssh_key'.format(prefix)) @mock.patch('{}.KubernetesContext._delete_rcs'.format(prefix)) @mock.patch('{}.KubernetesContext._delete_pods'.format(prefix)) def test_undeploy(self, mock_delete_pods, mock_delete_rcs, - mock_delete_ssh): + mock_delete_ssh, + mock_delete_services): k8s_context = KubernetesContext() k8s_context.init(context_cfg) @@ -61,7 +63,9 @@ class KubernetesTestCase(unittest.TestCase): self.assertTrue(mock_delete_ssh.called) self.assertTrue(mock_delete_rcs.called) self.assertTrue(mock_delete_pods.called) + self.assertTrue(mock_delete_services.called) + @mock.patch('{}.KubernetesContext._create_services'.format(prefix)) @mock.patch('{}.KubernetesContext._wait_until_running'.format(prefix)) @mock.patch('{}.KubernetesTemplate.get_rc_pods'.format(prefix)) @mock.patch('{}.KubernetesContext._create_rcs'.format(prefix)) @@ -70,7 +74,8 @@ class KubernetesTestCase(unittest.TestCase): mock_set_ssh_key, mock_create_rcs, mock_get_rc_pods, - mock_wait_until_running): + mock_wait_until_running, + mock_create_services): k8s_context = KubernetesContext() k8s_context.init(context_cfg) @@ -78,6 +83,7 @@ class KubernetesTestCase(unittest.TestCase): k8s_context.deploy() self.assertTrue(mock_set_ssh_key.called) self.assertTrue(mock_create_rcs.called) + self.assertTrue(mock_create_services.called) self.assertTrue(mock_get_rc_pods.called) self.assertTrue(mock_wait_until_running.called) @@ -106,14 +112,39 @@ class KubernetesTestCase(unittest.TestCase): mock_read_pod_status.return_value = 'Running' k8s_context._wait_until_running() - @mock.patch('{}.k8s_utils.get_pod_list'.format(prefix)) - def test_get_server(self, mock_get_pod_list): + @mock.patch('{}.k8s_utils.get_pod_by_name'.format(prefix)) + @mock.patch('{}.KubernetesContext._get_node_ip'.format(prefix)) + @mock.patch('{}.k8s_utils.get_service_by_name'.format(prefix)) + def test_get_server(self, + mock_get_service_by_name, + mock_get_node_ip, + mock_get_pod_by_name): + class Service(object): + def __init__(self): + self.name = 'yardstick' + self.node_port = 30000 + + class Services(object): + def __init__(self): + self.ports = [Service()] + + class Status(object): + def __init__(self): + self.pod_ip = '172.16.10.131' + + class Pod(object): + def __init__(self): + self.status = Status() + k8s_context = KubernetesContext() k8s_context.init(context_cfg) - mock_get_pod_list.return_value.items = [] + mock_get_service_by_name.return_value = Services() + mock_get_pod_by_name.return_value = Pod() + mock_get_node_ip.return_value = '172.16.10.131' + server = k8s_context._get_server('server') - self.assertIsNone(server) + self.assertIsNotNone(server) @mock.patch('{}.KubernetesContext._create_rc'.format(prefix)) def test_create_rcs(self, mock_create_rc): @@ -143,6 +174,28 @@ class KubernetesTestCase(unittest.TestCase): k8s_context._delete_rc({}) self.assertTrue(mock_delete_replication_controller.called) + @mock.patch('{}.k8s_utils.get_node_list'.format(prefix)) + def test_get_node_ip(self, mock_get_node_list): + + k8s_context = KubernetesContext() + k8s_context.init(context_cfg) + k8s_context._get_node_ip() + self.assertTrue(mock_get_node_list.called) + + @mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.create') + def test_create_services(self, mock_create): + k8s_context = KubernetesContext() + k8s_context.init(context_cfg) + k8s_context._create_services() + self.assertTrue(mock_create.called) + + @mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.delete') + def test_delete_services(self, mock_delete): + k8s_context = KubernetesContext() + k8s_context.init(context_cfg) + k8s_context._delete_services() + self.assertTrue(mock_delete.called) + def main(): unittest.main() diff --git a/tests/unit/network_services/libs/ixia_libs/test_IxNet.py b/tests/unit/network_services/libs/ixia_libs/test_IxNet.py index 0c82d74a8..3f374fb50 100644 --- a/tests/unit/network_services/libs/ixia_libs/test_IxNet.py +++ b/tests/unit/network_services/libs/ixia_libs/test_IxNet.py @@ -82,10 +82,8 @@ class TestIxNextgen(unittest.TestCase): config = { 'chassis': '1.1.1.1', - 'card1': '1', - 'card2': '2', - 'port1': '2', - 'port2': '2', + 'cards': ['1', '2'], + 'ports': ['2', '2'], } ixnet_gen = IxNextgen(ixnet) @@ -673,10 +671,8 @@ class TestIxNextgen(unittest.TestCase): 'machine': 'test1', 'port': 'test5', 'chassis': 'test4', - 'card1': '0000', - 'port1': '07', - 'card2': '0001', - 'port2': '08', + 'cards': ['0000', '0001'], + 'ports': ['07', '08'], 'output_dir': 'test2', 'version': 'test3', 'bidir': True, diff --git a/tests/unit/network_services/nfvi/test_resource.py b/tests/unit/network_services/nfvi/test_resource.py index 1c2c1f3e2..eba38c688 100644 --- a/tests/unit/network_services/nfvi/test_resource.py +++ b/tests/unit/network_services/nfvi/test_resource.py @@ -14,7 +14,6 @@ from __future__ import absolute_import import unittest -import multiprocessing import mock from yardstick.network_services.nfvi.resource import ResourceProfile @@ -86,17 +85,20 @@ class TestResourceProfile(unittest.TestCase): 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'}]}} def setUp(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ + with mock.patch("yardstick.ssh.AutoConnectSSH") as ssh: + self.ssh_mock = mock.Mock(autospec=ssh.SSH) + self.ssh_mock.execute = \ mock.Mock(return_value=(0, {}, "")) - ssh.from_node.return_value = ssh_mock + ssh.from_node.return_value = self.ssh_mock mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] - interfaces = \ + # interfaces = \ + # self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] + port_names = \ self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] self.resource_profile = \ - ResourceProfile(mgmt, interfaces, [1, 2, 3]) + ResourceProfile(mgmt, port_names, [1, 2, 3]) + self.resource_profile.connection = self.ssh_mock def test___init__(self): self.assertEqual(True, self.resource_profile.enable) @@ -118,133 +120,33 @@ class TestResourceProfile(unittest.TestCase): self.assertEqual(val, ('error', 'Invalid', '', '')) def test__start_collectd(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] - interfaces = \ - self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] - resource_profile = \ - ResourceProfile(mgmt, interfaces, [1, 2, 3]) - resource_profile._prepare_collectd_conf = mock.Mock() - self.assertIsNone( - resource_profile._start_collectd(ssh_mock, "/opt/nsb_bin")) - - def test__prepare_collectd_conf_BM(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] - interfaces = \ - self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] - resource_profile = \ - ResourceProfile(mgmt, interfaces, [1, 2, 3]) - resource_profile._provide_config_file = mock.Mock() - self.assertIsNone( - resource_profile._prepare_collectd_conf("/opt/nsb_bin")) - - def test__prepare_collectd_conf_managed_ovs_dpdk(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] - interfaces = \ - self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] - resource_profile = \ - ResourceProfile(mgmt, interfaces, [1, 2, 3]) - resource_profile._provide_config_file = mock.Mock() - self.assertIsNone( - resource_profile._prepare_collectd_conf("/opt/nsb_bin")) - - def test__prepare_collectd_conf_ovs_dpdk(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] - interfaces = \ - self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] - resource_profile = \ - ResourceProfile(mgmt, interfaces, [1, 2, 3]) - resource_profile._provide_config_file = mock.Mock() self.assertIsNone( - resource_profile._prepare_collectd_conf("/opt/nsb_bin")) + self.resource_profile._start_collectd(self.ssh_mock, "/opt/nsb_bin")) - def test__prepare_collectd_conf_managed_sriov(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] - interfaces = \ - self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] - resource_profile = \ - ResourceProfile(mgmt, interfaces, [1, 2, 3]) - resource_profile._provide_config_file = mock.Mock() + def test__prepare_collectd_conf(self): self.assertIsNone( - resource_profile._prepare_collectd_conf("/opt/nsb_bin")) + self.resource_profile._prepare_collectd_conf("/opt/nsb_bin")) - def test__prepare_collectd_conf_sriov(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] - interfaces = \ - self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] - resource_profile = \ - ResourceProfile(mgmt, interfaces, [1, 2, 3]) - resource_profile._provide_config_file = mock.Mock() - self.assertIsNone( - resource_profile._prepare_collectd_conf("/opt/nsb_bin")) @mock.patch("yardstick.network_services.nfvi.resource.open") - @mock.patch("yardstick.network_services.nfvi.resource.tempfile") @mock.patch("yardstick.network_services.nfvi.resource.os") - def test__provide_config_file(self, mock_open, mock_tempfile, mock_os): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] - interfaces = \ - self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] - resource_profile = \ - ResourceProfile(mgmt, interfaces, [1, 2, 3]) - resource_profile._prepare_collectd_conf = mock.Mock() - resource_profile.connection = ssh_mock - resource_profile.connection.put = \ - mock.Mock(return_value=(0, "", "")) - mock_tempfile.mkstemp = mock.Mock(return_value=["test", ""]) - self.assertIsNone( - resource_profile._provide_config_file("/opt/nsb_bin", - "collectd.cfg", {})) + def test__provide_config_file(self, mock_open, mock_os): + loadplugin = range(5) + port_names = range(5) + kwargs = { + "interval": '25', + "loadplugin": loadplugin, + "port_names": port_names, + } + self.resource_profile._provide_config_file("/opt/nsb_bin", "collectd.conf", kwargs) + self.ssh_mock.execute.assert_called_once() + @mock.patch("yardstick.network_services.nfvi.resource.open") def test_initiate_systemagent(self, mock_open): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] - interfaces = \ - self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface'] - resource_profile = \ - ResourceProfile(mgmt, interfaces, [1, 2, 3]) - resource_profile._start_collectd = mock.Mock() - self.assertIsNone( - resource_profile.initiate_systemagent("/opt/nsb_bin")) + self.resource_profile._start_collectd = mock.Mock() + self.assertIsNone( + self.resource_profile.initiate_systemagent("/opt/nsb_bin")) def test__parse_hugepages(self): reskey = ["cpu", "cpuFreq"] @@ -301,21 +203,21 @@ class TestResourceProfile(unittest.TestCase): self.assertDictEqual(res, expected_result) def test_parse_collectd_result_hugepage(self): - metric = {"nsb_stats/hugepages/free": "101"} + # amqp returns bytes + metric = {b"nsb_stats/hugepages/free": b"101"} self.resource_profile.parse_hugepages = \ mock.Mock(return_value={"free": "101"}) res = self.resource_profile.parse_collectd_result(metric, [0, 1, 2]) - expected_result = {'cpu': {}, 'dpdkstat': {}, 'hugepages': {'free': - '101'}, + expected_result = {'cpu': {}, 'dpdkstat': {}, 'hugepages': {'free': '101'}, 'memory': {}, 'ovs_stats': {}, 'timestamp': '', 'intel_pmu': {}, 'virt': {}} self.assertDictEqual(res, expected_result) def test_parse_collectd_result_dpdk_virt_ovs(self): - metric = {"nsb_stats/dpdkstat/tx": "101", - "nsb_stats/ovs_stats/tx": "101", - "nsb_stats/virt/virt/memory": "101"} + metric = {b"nsb_stats/dpdkstat/tx": b"101", + b"nsb_stats/ovs_stats/tx": b"101", + b"nsb_stats/virt/virt/memory": b"101"} self.resource_profile.parse_dpdkstat = \ mock.Mock(return_value={"tx": "101"}) self.resource_profile.parse_virt = \ @@ -347,7 +249,6 @@ class TestResourceProfile(unittest.TestCase): self.assertIsNotNone(res) def test_run_collectd_amqp(self): - _queue = multiprocessing.Queue() resource.AmqpConsumer = mock.Mock(autospec=collectd) self.assertIsNone(self.resource_profile.run_collectd_amqp()) diff --git a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py index 4b9f4172e..d0c4b6f42 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py @@ -723,8 +723,9 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase): result = dpdk_setup_helper._validate_cpu_cfg() self.assertEqual(result, expected) + @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time') @mock.patch('yardstick.ssh.SSH') - def test_setup_vnf_environment(self, _): + def test_setup_vnf_environment(self, _, mock_time): def execute(cmd, *args, **kwargs): if cmd.startswith('which '): return exec_failure @@ -782,6 +783,8 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase): dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) dpdk_setup_helper._validate_cpu_cfg = mock.Mock() + dpdk_setup_helper.bound_pci = [v['virtual-interface']["vpci"] for v in + vnfd_helper.interfaces] result = dpdk_setup_helper._setup_resources() self.assertIsInstance(result, ResourceProfile) self.assertEqual(dpdk_setup_helper.socket, 0) @@ -796,11 +799,14 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase): dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) dpdk_setup_helper._validate_cpu_cfg = mock.Mock() + dpdk_setup_helper.bound_pci = [v['virtual-interface']["vpci"] for v in + vnfd_helper.interfaces] result = dpdk_setup_helper._setup_resources() self.assertIsInstance(result, ResourceProfile) self.assertEqual(dpdk_setup_helper.socket, 1) - def test__detect_and_bind_drivers(self): + @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time') + def test__detect_and_bind_drivers(self, mock_time): vnfd_helper = VnfdHelper(deepcopy(self.VNFD_0)) ssh_helper = mock.Mock() # ssh_helper.execute = mock.Mock(return_value = (0, 'text', '')) diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py index 0c3520c44..f62a0fb3b 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py @@ -252,7 +252,8 @@ class TestIXIATrafficGen(unittest.TestCase): mock_traffic_profile = mock.Mock(autospec=TrafficProfile) mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE - mock_traffic_profile.ports = ["xe0", "xe1"] + # traffic_profile.ports is standardized on port_num + mock_traffic_profile.ports = [0, 1] mock_ssh_instance = mock.Mock(autospec=mock_ssh.SSH) mock_ssh_instance.execute.return_value = 0, "", "" @@ -346,9 +347,12 @@ class TestIXIATrafficGen(unittest.TestCase): 'task_path': '/path/to/task' } - with mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open', - create=True) as mock_open: - with mock.patch('yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.open', - create=True) as mock_ixia_open: - result = sut._traffic_runner(mock_traffic_profile) - self.assertIsNone(result) + @mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open', create=True) + @mock.patch('yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.open', + mock.mock_open(), create=True) + @mock.patch('yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.LOG.exception') + def _traffic_runner(*args): + result = sut._traffic_runner(mock_traffic_profile) + self.assertIsNone(result) + + _traffic_runner() diff --git a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py index 757109d11..3813aaa21 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py @@ -25,6 +25,7 @@ from multiprocessing import Process, Queue from tests.unit import STL_MOCKS from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper +from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper' @@ -55,14 +56,117 @@ get_file_abspath = MODULE_PATH.get_path class TestConfigCreate(unittest.TestCase): + VNFD_0 = { + 'short-name': 'VpeVnf', + 'vdu': [ + { + 'routing_table': [ + { + 'network': '152.16.100.20', + 'netmask': '255.255.255.0', + 'gateway': '152.16.100.20', + 'if': 'xe0' + }, + { + 'network': '152.16.40.20', + 'netmask': '255.255.255.0', + 'gateway': '152.16.40.20', + 'if': 'xe1' + }, + ], + 'description': 'VPE approximation using DPDK', + 'name': 'vpevnf-baremetal', + 'nd_route_tbl': [ + { + 'network': '0064:ff9b:0:0:0:0:9810:6414', + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:6414', + 'if': 'xe0' + }, + { + 'network': '0064:ff9b:0:0:0:0:9810:2814', + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:2814', + 'if': 'xe1' + }, + ], + 'id': 'vpevnf-baremetal', + 'external-interface': [ + { + 'virtual-interface': { + 'dst_mac': '00:00:00:00:00:03', + 'vpci': '0000:05:00.0', + 'local_ip': '152.16.100.19', + 'type': 'PCI-PASSTHROUGH', + 'netmask': '255.255.255.0', + 'dpdk_port_num': 0, + 'bandwidth': '10 Gbps', + 'dst_ip': '152.16.100.20', + 'local_mac': '00:00:00:00:00:01', + 'vld_id': 'uplink_0', + 'ifname': 'xe0', + }, + 'vnfd-connection-point-ref': 'xe0', + 'name': 'xe0' + }, + { + 'virtual-interface': { + 'dst_mac': '00:00:00:00:00:04', + 'vpci': '0000:05:00.1', + 'local_ip': '152.16.40.19', + 'type': 'PCI-PASSTHROUGH', + 'netmask': '255.255.255.0', + 'dpdk_port_num': 1, + 'bandwidth': '10 Gbps', + 'dst_ip': '152.16.40.20', + 'local_mac': '00:00:00:00:00:02', + 'vld_id': 'downlink_0', + 'ifname': 'xe1', + }, + 'vnfd-connection-point-ref': 'xe1', + 'name': 'xe1' + }, + ], + }, + ], + 'description': 'Vpe approximation using DPDK', + 'mgmt-interface': { + 'vdu-id': 'vpevnf-baremetal', + 'host': '1.1.1.1', + 'password': 'r00t', + 'user': 'root', + 'ip': '1.1.1.1' + }, + 'benchmark': { + 'kpi': [ + 'packets_in', + 'packets_fwd', + 'packets_dropped', + ], + }, + 'connection-point': [ + { + 'type': 'VPORT', + 'name': 'xe0', + }, + { + 'type': 'VPORT', + 'name': 'xe1', + }, + ], + 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh' + } + def test___init__(self): - config_create = ConfigCreate([0], [1], 2) - self.assertEqual(config_create.uplink_ports, [0]) - self.assertEqual(config_create.downlink_ports, [1]) + vnfd_helper = VnfdHelper(self.VNFD_0) + config_create = ConfigCreate(vnfd_helper, 2) + self.assertEqual(config_create.uplink_ports, ['xe0']) + self.assertEqual(config_create.downlink_ports, ['xe1']) self.assertEqual(config_create.socket, 2) def test_vpe_initialize(self): - config_create = ConfigCreate([0], [1], 2) + vnfd_helper = VnfdHelper(self.VNFD_0) + config_create = ConfigCreate(vnfd_helper, 2) config = configparser.ConfigParser() config_create.vpe_initialize(config) self.assertEqual(config.get('EAL', 'log_level'), '0') @@ -72,14 +176,16 @@ class TestConfigCreate(unittest.TestCase): self.assertEqual(config.get('MEMPOOL1', 'pool_size'), '2M') def test_vpe_rxq(self): - config_create = ConfigCreate([0], [1, 2], 3) + vnfd_helper = VnfdHelper(self.VNFD_0) + config_create = ConfigCreate(vnfd_helper, 2) config = configparser.ConfigParser() + config_create.downlink_ports = ['xe0'] config_create.vpe_rxq(config) - self.assertEqual(config.get('RXQ1.0', 'mempool'), 'MEMPOOL1') - self.assertEqual(config.get('RXQ2.0', 'mempool'), 'MEMPOOL1') + self.assertEqual(config.get('RXQ0.0', 'mempool'), 'MEMPOOL1') def test_get_sink_swq(self): - config_create = ConfigCreate([0], [1], 2) + vnfd_helper = VnfdHelper(self.VNFD_0) + config_create = ConfigCreate(vnfd_helper, 2) config = configparser.ConfigParser() config.add_section('PIPELINE0') config.set('PIPELINE0', 'key1', 'value1') @@ -96,15 +202,26 @@ class TestConfigCreate(unittest.TestCase): self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key5', 5), 'SWQ0 SINK1') def test_generate_vpe_script(self): - vpe_config_vnf = ConfigCreate([0], [0], 0) + vnfd_helper = VnfdHelper(self.VNFD_0) + vpe_config_vnf = ConfigCreate(vnfd_helper, 2) intf = [ { + "name": 'xe1', + "virtual-interface": { + "dst_ip": "1.1.1.1", + "dst_mac": "00:00:00:00:00:00:02", + }, + }, + { + "name": 'xe2', "virtual-interface": { "dst_ip": "1.1.1.1", "dst_mac": "00:00:00:00:00:00:02", }, }, ] + vpe_config_vnf.downlink_ports = ['xe1'] + vpe_config_vnf.uplink_ports = ['xe2'] result = vpe_config_vnf.generate_vpe_script(intf) self.assertIsInstance(result, str) self.assertNotEqual(result, '') @@ -132,7 +249,10 @@ class TestConfigCreate(unittest.TestCase): }, ] - config_create = ConfigCreate(uplink_ports, downlink_ports, 23) + vnfd_helper = VnfdHelper(self.VNFD_0) + config_create = ConfigCreate(vnfd_helper, 23) + config_create.downlink_ports = ['xe1'] + config_create.uplink_ports = ['xe1'] curr_path = os.path.dirname(os.path.abspath(__file__)) vpe_cfg = "samples/vnf_samples/nsut/vpe/vpe_config" vnf_cfg = os.path.join(curr_path, "../../../../..", vpe_cfg) diff --git a/tests/unit/orchestrator/test_kubernetes.py b/tests/unit/orchestrator/test_kubernetes.py index 51718ab86..1a3291c89 100644 --- a/tests/unit/orchestrator/test_kubernetes.py +++ b/tests/unit/orchestrator/test_kubernetes.py @@ -62,7 +62,10 @@ service ssh restart;while true ; do sleep 10000; done" }, "name": "k8s-86096c30-key" } - ] + ], + "nodeSelector": { + "kubernetes.io/hostname": "node-01" + } } } } @@ -71,7 +74,8 @@ service ssh restart;while true ; do sleep 10000; done" 'command': '/bin/bash', 'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \ service ssh restart;while true ; do sleep 10000; done'], - 'ssh_key': 'k8s-86096c30-key' + 'ssh_key': 'k8s-86096c30-key', + 'nodeSelector': { 'kubernetes.io/hostname': 'node-01'} } name = 'host-k8s-86096c30' output_r = KubernetesObject(name, **input_s).get_template() diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py index a39f63137..2334e5076 100644 --- a/yardstick/benchmark/contexts/kubernetes.py +++ b/yardstick/benchmark/contexts/kubernetes.py @@ -54,6 +54,7 @@ class KubernetesContext(Context): LOG.info('Launch containers') self._create_rcs() + self._create_services() time.sleep(1) self.template.get_rc_pods() @@ -63,6 +64,7 @@ class KubernetesContext(Context): self._delete_ssh_key() self._delete_rcs() self._delete_pods() + self._delete_services() super(KubernetesContext, self).undeploy() @@ -80,6 +82,14 @@ class KubernetesContext(Context): return False return True + def _create_services(self): + for obj in self.template.service_objs: + obj.create() + + def _delete_services(self): + for obj in self.template.service_objs: + obj.delete() + def _create_rcs(self): for obj in self.template.k8s_objs: self._create_rc(obj.get_template()) @@ -126,15 +136,22 @@ class KubernetesContext(Context): utils.remove_file(self.public_key_path) def _get_server(self, name): - resp = k8s_utils.get_pod_list() - hosts = ({'name': n.metadata.name, - 'ip': n.status.pod_ip, - 'user': 'root', - 'key_filename': self.key_path, - 'private_ip': n.status.pod_ip} - for n in resp.items if n.metadata.name.startswith(name)) - - return next(hosts, None) + service_name = '{}-service'.format(name) + service = k8s_utils.get_service_by_name(service_name).ports[0] + + host = { + 'name': service.name, + 'ip': self._get_node_ip(), + 'private_ip': k8s_utils.get_pod_by_name(name).status.pod_ip, + 'ssh_port': service.node_port, + 'user': 'root', + 'key_filename': self.key_path, + } + + return host + + def _get_node_ip(self): + return k8s_utils.get_node_list().items[0].status.addresses[0].address def _get_network(self, attr_name): return None diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 0b6e3230b..c175a950b 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -125,9 +125,10 @@ class Task(object): # pragma: no cover except KeyboardInterrupt: raise except Exception: - LOG.exception("Running test case %s failed!", case_name) + LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True) testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []} else: + LOG.info('Testcase: "%s" SUCCESS!!!', case_name) testcases[case_name] = {'criteria': 'PASS', 'tc_data': data} if args.keep_deploy: @@ -272,7 +273,9 @@ class Task(object): # pragma: no cover runner = self.run_one_scenario(scenario, output_file) status = runner_join(runner) if status != 0: - LOG.error('Scenario: %s ERROR', scenario.get('type')) + LOG.error('Scenario NO.%s: "%s" ERROR!', + scenarios.index(scenario) + 1, + scenario.get('type')) raise RuntimeError self.outputs.update(runner.get_output()) result.extend(runner.get_result()) @@ -333,7 +336,7 @@ class Task(object): # pragma: no cover context_cfg['target'] = {"ipaddr": target} else: context_cfg['target'] = Context.get_server(target) - if self._is_same_heat_context(cfg["host"], target): + if self._is_same_context(cfg["host"], target): context_cfg['target']["ipaddr"] = context_cfg['target']["private_ip"] else: context_cfg['target']["ipaddr"] = context_cfg['target']["ip"] @@ -358,8 +361,8 @@ class Task(object): # pragma: no cover context_cfg['target'] = {} else: context_cfg['target'] = Context.get_server(target) - if self._is_same_heat_context(scenario_cfg["host"], - target): + if self._is_same_context(scenario_cfg["host"], + target): ip_list.append(context_cfg["target"]["private_ip"]) else: ip_list.append(context_cfg["target"]["ip"]) @@ -377,7 +380,7 @@ class Task(object): # pragma: no cover return runner - def _is_same_heat_context(self, host_attr, target_attr): + def _is_same_context(self, host_attr, target_attr): """check if two servers are in the same heat context host_attr: either a name for a server created by yardstick or a dict with attribute name mapping when using external heat templates @@ -385,7 +388,7 @@ class Task(object): # pragma: no cover with attribute name mapping when using external heat templates """ for context in self.contexts: - if context.__context_type__ != "Heat": + if context.__context_type__ not in {"Heat", "Kubernetes"}: continue host = context._get_server(host_attr) diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py index 50d44c1ca..979e3ab14 100644 --- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py +++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py @@ -40,6 +40,21 @@ class BaremetalAttacker(BaseAttacker): self.connection = ssh.SSH.from_node(host, defaults={"user": "root"}) self.connection.wait(timeout=600) LOG.debug("ssh host success!") + + jump_host_name = self._config.get("jump_host", None) + self.jump_connection = None + if jump_host_name is not None: + jump_host = self._context.get(jump_host_name, None) + + LOG.debug("jump_host ip:%s user:%s", jump_host['ip'], jump_host['user']) + self.jump_connection = ssh.SSH.from_node( + jump_host, + # why do we allow pwd for password? + defaults={"user": "root", "password": jump_host.get("pwd")} + ) + self.jump_connection.wait(timeout=600) + LOG.debug("ssh jump host success!") + self.host_ip = host['ip'] self.ipmi_ip = host.get("ipmi_ip", None) @@ -49,6 +64,7 @@ class BaremetalAttacker(BaseAttacker): self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down') self.check_script = self.get_script_fullpath( self.fault_cfg['check_script']) + self.inject_script = self.get_script_fullpath(self.fault_cfg['inject_script']) self.recovery_script = self.get_script_fullpath( self.fault_cfg['recovery_script']) @@ -70,39 +86,27 @@ class BaremetalAttacker(BaseAttacker): return True def inject_fault(self): - exit_status, stdout, stderr = self.connection.execute( - "sudo shutdown -h now") - LOG.debug("inject fault ret: %s out:%s err:%s", - exit_status, stdout, stderr) - if not exit_status: - LOG.info("inject fault success") + LOG.info("Inject fault START") + cmd = "sudo /bin/bash -s {0} {1} {2} {3}".format( + self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "off") + with open(self.inject_script, "r") as stdin_file: + if self.jump_connection is not None: + LOG.info("Power off node via IPMI") + self.jump_connection.execute(cmd, stdin=stdin_file) + else: + _execute_shell_command(cmd, stdin=stdin_file) + LOG.info("Inject fault END") def recover(self): - jump_host_name = self._config.get("jump_host", None) - self.jump_connection = None - if jump_host_name is not None: - host = self._context.get(jump_host_name, None) - - LOG.debug("jump_host ip:%s user:%s", host['ip'], host['user']) - self.jump_connection = ssh.SSH.from_node( - host, - # why do we allow pwd for password? - defaults={"user": "root", "password": host.get("pwd")} - ) - self.jump_connection.wait(timeout=600) - LOG.debug("ssh jump host success!") - - if self.jump_connection is not None: - with open(self.recovery_script, "r") as stdin_file: - self.jump_connection.execute( - "sudo /bin/bash -s {0} {1} {2} {3}".format( - self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"), - stdin=stdin_file) - else: - _execute_shell_command( - "sudo /bin/bash -s {0} {1} {2} {3}".format( - self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"), - stdin=open(self.recovery_script, "r")) + LOG.info("Recover fault START") + cmd = "sudo /bin/bash -s {0} {1} {2} {3}".format( + self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on") + with open(self.recovery_script, "r") as stdin_file: + if self.jump_connection is not None: + self.jump_connection.execute(cmd, stdin=stdin_file) + else: + _execute_shell_command(cmd, stdin=stdin_file) + LOG.info("Recover fault END") def _test(): # pragma: no cover diff --git a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml index ee7ea7d83..5f43a701a 100644 --- a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml +++ b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml @@ -23,6 +23,7 @@ kill-lxc-process: bare-metal-down: check_script: ha_tools/check_host_ping.bash + inject_script: ha_tools/ipmi_power.bash recovery_script: ha_tools/ipmi_power.bash stop-service: diff --git a/yardstick/benchmark/scenarios/networking/netperf.py b/yardstick/benchmark/scenarios/networking/netperf.py index 08d5dd166..a8d9010ed 100755 --- a/yardstick/benchmark/scenarios/networking/netperf.py +++ b/yardstick/benchmark/scenarios/networking/netperf.py @@ -114,6 +114,10 @@ class Netperf(base.Scenario): cmd_args += " %s %s" % (option_pair[1], options[option_pair[0]]) + # Enable IP routing for UDP_STREAM test + if testname == "UDP_STREAM": + cmd_args += " -R 1" + cmd = "sudo bash netperf.sh %s" % (cmd_args) LOG.debug("Executing command: %s", cmd) status, stdout, stderr = self.client.execute(cmd) diff --git a/yardstick/benchmark/scenarios/networking/netperf_install_arm64.patch b/yardstick/benchmark/scenarios/networking/netperf_install_arm64.patch new file mode 100644 index 000000000..b41c1d207 --- /dev/null +++ b/yardstick/benchmark/scenarios/networking/netperf_install_arm64.patch @@ -0,0 +1,42 @@ +diff --git a/yardstick/benchmark/scenarios/networking/netperf_install.bash b/yardstick/benchmark/scenarios/networking/netperf_install.bash +index 0e3808f..f9362eb 100755 +--- a/yardstick/benchmark/scenarios/networking/netperf_install.bash ++++ b/yardstick/benchmark/scenarios/networking/netperf_install.bash +@@ -1,9 +1,9 @@ + #!/bin/bash + + ############################################################################## +-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. ++# Copyright (c) 2017, Arm Limited. All rights reserved. + # +-# All rights reserved. This program and the accompanying materials ++# This program and the accompanying materials + # are made available under the terms of the Apache License, Version 2.0 + # which accompanies this distribution, and is available at + # http://www.apache.org/licenses/LICENSE-2.0 +@@ -19,21 +19,11 @@ then + fi + + echo "===Install netperf before test begin!!!===" +-cp /etc/apt/sources.list /etc/apt/sources.list_bkp +-cp /etc/resolv.conf /etc/resolv.conf_bkp +-echo "nameserver 8.8.4.4" >> /etc/resolv.conf +- +-cat <<EOF >/etc/apt/sources.list +-deb http://archive.ubuntu.com/ubuntu/ trusty main restricted universe multiverse +-deb http://archive.ubuntu.com/ubuntu/ trusty-security main restricted universe multiverse +-deb http://archive.ubuntu.com/ubuntu/ trusty-updates main restricted universe multiverse +-deb http://archive.ubuntu.com/ubuntu/ trusty-proposed main restricted universe multiverse +-deb http://archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe multiverse +-EOF +- +-sudo apt-get update +-sudo apt-get install -y netperf + ++apt-get update -y ++apt-get install -y wget ++wget http://launchpadlibrarian.net/155043952/netperf_2.6.0-2_arm64.deb ++dpkg -i ./netperf_2.6.0-2_arm64.deb + service netperf start + + echo "===Install netperf before test end!!!===" diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py index e4c232830..0cf7b9eab 100644 --- a/yardstick/common/kubernetes_utils.py +++ b/yardstick/common/kubernetes_utils.py @@ -28,6 +28,60 @@ def get_core_api(): # pragma: no cover return client.CoreV1Api() +def get_node_list(**kwargs): # pragma: no cover + core_v1_api = get_core_api() + try: + return core_v1_api.list_node(**kwargs) + except ApiException: + LOG.exception('Get node list failed') + raise + + +def create_service(template, + namespace='default', + wait=False, + **kwargs): # pragma: no cover + core_v1_api = get_core_api() + metadata = client.V1ObjectMeta(**template.get('metadata', {})) + + ports = [client.V1ServicePort(**port) for port in + template.get('spec', {}).get('ports', [])] + template['spec']['ports'] = ports + spec = client.V1ServiceSpec(**template.get('spec', {})) + + service = client.V1Service(metadata=metadata, spec=spec) + + try: + core_v1_api.create_namespaced_service('default', service) + except ApiException: + LOG.exception('Create Service failed') + raise + + +def delete_service(name, + namespace='default', + **kwargs): # pragma: no cover + core_v1_api = get_core_api() + try: + core_v1_api.delete_namespaced_service(name, namespace, **kwargs) + except ApiException: + LOG.exception('Delete Service failed') + + +def get_service_list(namespace='default', **kwargs): + core_v1_api = get_core_api() + try: + return core_v1_api.list_namespaced_service(namespace, **kwargs) + except ApiException: + LOG.exception('Get Service list failed') + raise + + +def get_service_by_name(name): # pragma: no cover + service_list = get_service_list() + return next((s.spec for s in service_list.items if s.metadata.name == name), None) + + def create_replication_controller(template, namespace='default', wait=False, @@ -135,3 +189,8 @@ def get_pod_list(namespace='default'): # pragma: no cover except ApiException: LOG.exception('Get pod list failed') raise + + +def get_pod_by_name(name): # pragma: no cover + pod_list = get_pod_list() + return next((n for n in pod_list.items if n.metadata.name.startswith(name)), None) diff --git a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py index 4b906508c..358e6e761 100644 --- a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py +++ b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py @@ -113,10 +113,10 @@ class IxNextgen(object): } MODE_SEEDS_MAP = { - 0: ('uplink_0', ['256', '2048']), + 0: ('uplink', ['256', '2048']), } - MODE_SEEDS_DEFAULT = 'downlink_0', ['2048', '256'] + MODE_SEEDS_DEFAULT = 'downlink', ['2048', '256'] @staticmethod def find_view_obj(view_name, views): @@ -125,24 +125,27 @@ class IxNextgen(object): @staticmethod def get_config(tg_cfg): + card = [] + port = [] external_interface = tg_cfg["vdu"][0]["external-interface"] - card_port0 = external_interface[0]["virtual-interface"]["vpci"] - card_port1 = external_interface[1]["virtual-interface"]["vpci"] - card0, port0 = card_port0.split(':')[:2] - card1, port1 = card_port1.split(':')[:2] + for intf in external_interface: + card_port0 = intf["virtual-interface"]["vpci"] + card0, port0 = card_port0.split(':')[:2] + card.append(card0) + port.append(port0) + cfg = { 'py_lib_path': tg_cfg["mgmt-interface"]["tg-config"]["py_lib_path"], 'machine': tg_cfg["mgmt-interface"]["ip"], 'port': tg_cfg["mgmt-interface"]["tg-config"]["tcl_port"], 'chassis': tg_cfg["mgmt-interface"]["tg-config"]["ixchassis"], - 'card1': card0, - 'port1': port0, - 'card2': card1, - 'port2': port1, + 'cards': card, + 'ports': port, 'output_dir': tg_cfg["mgmt-interface"]["tg-config"]["dut_result_dir"], 'version': tg_cfg["mgmt-interface"]["tg-config"]["version"], 'bidir': True, } + return cfg def __init__(self, ixnet=None): @@ -183,9 +186,13 @@ class IxNextgen(object): self.set_random_ip_multi_attribute(ip, seeds[1], fixed_bits, random_mask, l3_count) def add_ip_header(self, params, version): - for it, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement"): - mode, seeds = self.MODE_SEEDS_MAP.get(i, self.MODE_SEEDS_DEFAULT) - l3 = params[mode]['outer_l3'] + for it, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement", 1): + iter1 = (v['outer_l3'] for v in params.values() if str(v['id']) == str(i)) + try: + l3 = next(iter1, {}) + seeds = self.MODE_SEEDS_MAP.get(i, self.MODE_SEEDS_DEFAULT)[1] + except (KeyError, IndexError): + continue for ip, ip_bits, _ in self.iter_over_get_lists(ep, 'stack', 'field'): self.set_random_ip_multi_attributes(ip_bits, version, seeds, l3) @@ -222,10 +229,11 @@ class IxNextgen(object): def ix_assign_ports(self): vports = self.ixnet.getList(self.ixnet.getRoot(), 'vport') - ports = [ - (self._cfg['chassis'], self._cfg['card1'], self._cfg['port1']), - (self._cfg['chassis'], self._cfg['card2'], self._cfg['port2']), - ] + ports = [] + + chassis = self._cfg['chassis'] + ports = [(chassis, card, port) for card, port in + zip(self._cfg['cards'], self._cfg['ports'])] vport_list = self.ixnet.getList("/", "vport") self.ixnet.execute('assignPorts', ports, [], vport_list, True) @@ -276,10 +284,10 @@ class IxNextgen(object): def update_ether_multi_attributes(self, ether, l2): if "ethernet.header.destinationAddress" in ether: - self.update_ether_multi_attribute(ether, str(l2['dstmac'])) + self.update_ether_multi_attribute(ether, str(l2.get('dstmac', "00:00:00:00:00:02"))) if "ethernet.header.sourceAddress" in ether: - self.update_ether_multi_attribute(ether, str(l2['srcmac'])) + self.update_ether_multi_attribute(ether, str(l2.get('srcmac', "00:00:00:00:00:01"))) def ix_update_ether(self, params): for ti, ep, index in self.iter_over_get_lists('/traffic', 'trafficItem', diff --git a/yardstick/network_services/nfvi/collectd.conf b/yardstick/network_services/nfvi/collectd.conf index 3928dcbca..22bd5d49d 100644 --- a/yardstick/network_services/nfvi/collectd.conf +++ b/yardstick/network_services/nfvi/collectd.conf @@ -15,7 +15,7 @@ Hostname "nsb_stats" FQDNLookup true -Interval {interval} +Interval {{ interval }} ############################################################################## # LoadPlugin section # @@ -24,7 +24,9 @@ Interval {interval} ############################################################################## #LoadPlugin syslog -{loadplugin} +{% for plugin in loadplugins %} +LoadPlugin {{ plugin }} +{% endfor %} ############################################################################## # Plugin configuration # @@ -38,42 +40,31 @@ Interval {interval} #</Plugin> <Plugin amqp> - <Publish "name"> - Host "0.0.0.0" - Port "5672" - VHost "/" - User "admin" - Password "admin" - Exchange "amq.fanout" - RoutingKey "collectd" - Persistent false - StoreRates false - ConnectionRetryDelay 0 - </Publish> + <Publish "name"> + Host "0.0.0.0" + Port "5672" + VHost "/" + User "admin" + Password "admin" + Exchange "amq.fanout" + RoutingKey "collectd" + Persistent false + StoreRates false + ConnectionRetryDelay 0 + </Publish> </Plugin> <Plugin cpu> - ReportByCpu true - ReportByState true - ValuesPercentage true + ReportByCpu true + ReportByState true + ValuesPercentage true </Plugin> <Plugin memory> - ValuesAbsolute true - ValuesPercentage false -</Plugin> - -<Plugin "intel_rdt"> - Cores "" + ValuesAbsolute true + ValuesPercentage false </Plugin> -<Plugin intel_pmu> - ReportHardwareCacheEvents true - ReportKernelPMUEvents true - ReportSoftwareEvents true - EventList "/root/.cache/pmu-events/GenuineIntel-6-2D-core.json" - HardwareEvents "L2_RQSTS.CODE_RD_HIT,L2_RQSTS.CODE_RD_MISS" "L2_RQSTS.ALL_CODE_RD" -</Plugin> <Plugin hugepages> ReportPerNodeHP true @@ -83,15 +74,25 @@ Interval {interval} ValuesPercentage false </Plugin> -<Plugin hugepages> - ReportPerNodeHP true - ReportRootHP true - ValuesPages true - ValuesBytes false - ValuesPercentage false + +{% if "intel_rdt" in plugins %} +<Plugin "intel_rdt"> + Cores "" +</Plugin> +{% endif %} + +{% if "intel_pmu" in plugins %} +<Plugin intel_pmu> + ReportHardwareCacheEvents true + ReportKernelPMUEvents true + ReportSoftwareEvents true + EventList "/root/.cache/pmu-events/GenuineIntel-6-2D-core.json" + HardwareEvents "L2_RQSTS.CODE_RD_HIT,L2_RQSTS.CODE_RD_MISS" "L2_RQSTS.ALL_CODE_RD" </Plugin> +{% endif %} -<Plugin dpdkstat> +{% if "dpdkstat" in plugins %} +<Plugin "dpdkstat"> <EAL> Coremask "0x1" MemoryChannels "4" @@ -100,20 +101,24 @@ Interval {interval} </EAL> SharedMemObj "dpdk_collectd_stats_0" EnabledPortMask 0xffff - {dpdk_interface} +{% for port_name in port_names %} + PortName {{ port_name }} +{% endfor %} </Plugin> +{% endif %} -<Plugin virt> - Domain "samplevnf" +{% if "virt" in plugins %} +<Plugin "virt"> +# monitor all domains </Plugin> +{% endif %} -<Plugin ovs_stats> +{% if "ovs_stats" in plugins %} +<Plugin "ovs_stats"> Port "6640" Address "127.0.0.1" Socket "/usr/local/var/run/openvswitch/db.sock" - Bridges "br0" "br_ext" +# don't specify bridges, monitor all bridges </Plugin> +{% endif %} -<Include "/etc/collectd/collectd.conf.d"> - Filter "*.conf" -</Include> diff --git a/yardstick/network_services/nfvi/collectd.sh b/yardstick/network_services/nfvi/collectd.sh index 296c4a213..bdc5abd03 100755 --- a/yardstick/network_services/nfvi/collectd.sh +++ b/yardstick/network_services/nfvi/collectd.sh @@ -142,7 +142,8 @@ else fi modprobe msr -cp $INSTALL_NSB_BIN/collectd.conf /opt/collectd/etc/ +# we overwrite the config file during _start_collectd so don't copy it +#cp $INSTALL_NSB_BIN/collectd.conf /opt/nsb_bin/collectd/etc/ sudo service rabbitmq-server restart echo "Check if admin user already created" rabbitmqctl list_users | grep '^admin$' > /dev/null diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py index fa32a4dcf..d807f5e46 100644 --- a/yardstick/network_services/nfvi/resource.py +++ b/yardstick/network_services/nfvi/resource.py @@ -15,16 +15,22 @@ from __future__ import absolute_import from __future__ import print_function -import tempfile + import logging +from itertools import chain + +import jinja2 import os import os.path import re import multiprocessing +import pkg_resources from oslo_config import cfg +from oslo_utils.encodeutils import safe_decode from yardstick import ssh +from yardstick.common.task_template import finalize_for_yaml from yardstick.common.utils import validate_non_string_sequence from yardstick.network_services.nfvi.collectd import AmqpConsumer from yardstick.network_services.utils import get_nsb_option @@ -34,26 +40,36 @@ LOG = logging.getLogger(__name__) CONF = cfg.CONF ZMQ_OVS_PORT = 5567 ZMQ_POLLING_TIME = 12000 -LIST_PLUGINS_ENABLED = ["amqp", "cpu", "cpufreq", "intel_rdt", "memory", - "hugepages", "dpdkstat", "virt", "ovs_stats", "intel_pmu"] +LIST_PLUGINS_ENABLED = ["amqp", "cpu", "cpufreq", "memory", + "hugepages"] class ResourceProfile(object): """ This profile adds a resource at the beginning of the test session """ + COLLECTD_CONF = "collectd.conf" + AMPQ_PORT = 5672 + DEFAULT_INTERVAL = 25 - def __init__(self, mgmt, interfaces=None, cores=None): + def __init__(self, mgmt, port_names=None, cores=None, plugins=None, interval=None): + if plugins is None: + self.plugins = {} + else: + self.plugins = plugins + if interval is None: + self.interval = self.DEFAULT_INTERVAL + else: + self.interval = interval self.enable = True self.cores = validate_non_string_sequence(cores, default=[]) self._queue = multiprocessing.Queue() self.amqp_client = None - self.interfaces = validate_non_string_sequence(interfaces, default=[]) + self.port_names = validate_non_string_sequence(port_names, default=[]) - # why the host or ip? - self.vnfip = mgmt.get("host", mgmt["ip"]) - self.connection = ssh.SSH.from_node(mgmt, overrides={"ip": self.vnfip}) - self.connection.wait() + # we need to save mgmt so we can connect to port 5672 + self.mgmt = mgmt + self.connection = ssh.AutoConnectSSH.from_node(mgmt) def check_if_sa_running(self, process): """ verify if system agent is running """ @@ -62,7 +78,7 @@ class ResourceProfile(object): def run_collectd_amqp(self): """ run amqp consumer to collect the NFVi data """ - amqp_url = 'amqp://admin:admin@{}:5672/%2F'.format(self.vnfip) + amqp_url = 'amqp://admin:admin@{}:{}/%2F'.format(self.mgmt['ip'], self.AMPQ_PORT) amqp = AmqpConsumer(amqp_url, self._queue) try: amqp.run() @@ -124,7 +140,9 @@ class ResourceProfile(object): } testcase = "" - for key, value in metrics.items(): + # unicode decode + decoded = ((safe_decode(k, 'utf-8'), safe_decode(v, 'utf-8')) for k, v in metrics.items()) + for key, value in decoded: key_split = key.split("/") res_key_iter = (key for key in key_split if "nsb_stats" not in key) res_key0 = next(res_key_iter) @@ -176,35 +194,36 @@ class ResourceProfile(object): msg = self.parse_collectd_result(metric, self.cores) return msg - def _provide_config_file(self, bin_path, nfvi_cfg, kwargs): - with open(os.path.join(bin_path, nfvi_cfg), 'r') as cfg: - template = cfg.read() - cfg, cfg_content = tempfile.mkstemp() - with os.fdopen(cfg, "w+") as cfg: - cfg.write(template.format(**kwargs)) - cfg_file = os.path.join(bin_path, nfvi_cfg) - self.connection.put(cfg_content, cfg_file) - - def _prepare_collectd_conf(self, bin_path): + def _provide_config_file(self, config_file_path, nfvi_cfg, template_kwargs): + template = pkg_resources.resource_string("yardstick.network_services.nfvi", + nfvi_cfg).decode('utf-8') + cfg_content = jinja2.Template(template, trim_blocks=True, lstrip_blocks=True, + finalize=finalize_for_yaml).render( + **template_kwargs) + # cfg_content = io.StringIO(template.format(**template_kwargs)) + cfg_file = os.path.join(config_file_path, nfvi_cfg) + # must write as root, so use sudo + self.connection.execute("cat | sudo tee {}".format(cfg_file), stdin=cfg_content) + + def _prepare_collectd_conf(self, config_file_path): """ Prepare collectd conf """ - loadplugin = "\n".join("LoadPlugin {0}".format(plugin) - for plugin in LIST_PLUGINS_ENABLED) - - interfaces = "\n".join("PortName '{0[name]}'".format(interface) - for interface in self.interfaces) kwargs = { - "interval": '25', - "loadplugin": loadplugin, - "dpdk_interface": interfaces, + "interval": self.interval, + "loadplugins": set(chain(LIST_PLUGINS_ENABLED, self.plugins.keys())), + # Optional fields PortName is descriptive only, use whatever is present + "port_names": self.port_names, + # "ovs_bridge_interfaces": ["br-int"], + "plugins": self.plugins, } - self._provide_config_file(bin_path, 'collectd.conf', kwargs) + self._provide_config_file(config_file_path, self.COLLECTD_CONF, kwargs) def _start_collectd(self, connection, bin_path): LOG.debug("Starting collectd to collect NFVi stats") - connection.execute('sudo pkill -9 collectd') + connection.execute('sudo pkill -x -9 collectd') bin_path = get_nsb_option("bin_path") - collectd_path = os.path.join(bin_path, "collectd", "collectd") + collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd") + config_file_path = os.path.join(bin_path, "collectd", "etc") exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0] if exit_status != 0: LOG.warning("%s is not present disabling", collectd_path) @@ -217,7 +236,9 @@ class ResourceProfile(object): # collectd_installer, http_proxy, https_proxy)) return LOG.debug("Starting collectd to collect NFVi stats") - self._prepare_collectd_conf(bin_path) + # ensure collectd.conf.d exists to avoid error/warning + connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d") + self._prepare_collectd_conf(config_file_path) # Reset amqp queue LOG.debug("reset and setup amqp to collect data from collectd") @@ -228,7 +249,7 @@ class ResourceProfile(object): connection.execute("sudo rabbitmqctl start_app") connection.execute("sudo service rabbitmq-server restart") - LOG.debug("Creating amdin user for rabbitmq in order to collect data from collectd") + LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd") connection.execute("sudo rabbitmqctl delete_user guest") connection.execute("sudo rabbitmqctl add_user admin admin") connection.execute("sudo rabbitmqctl authenticate_user admin admin") @@ -241,7 +262,11 @@ class ResourceProfile(object): def initiate_systemagent(self, bin_path): """ Start system agent for NFVi collection on host """ if self.enable: - self._start_collectd(self.connection, bin_path) + try: + self._start_collectd(self.connection, bin_path) + except Exception: + LOG.exception("Exception during collectd start") + raise def start(self): """ start nfvi collection """ diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py index ee58172d8..28480b8e9 100644 --- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py +++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py @@ -43,35 +43,39 @@ class IXIARFC2544Profile(TrexProfile): traffic = static_traffic[traffickey] # outer_l2 index = 0 - for key, value in profile_data[traffickey].items(): - framesize = value['outer_l2']['framesize'] - traffic['outer_l2']['framesize'] = framesize - traffic['framesPerSecond'] = True - traffic['bidir'] = False - traffic['outer_l2']['srcmac'] = \ - mac["src_mac_{}".format(traffic['id'])] - traffic['outer_l2']['dstmac'] = \ - mac["dst_mac_{}".format(traffic['id'])] - - # outer_l3 - if "outer_l3v6" in list(value.keys()): - traffic['outer_l3'] = value['outer_l3v6'] - srcip4 = value['outer_l3v6']['srcip6'] - traffic['outer_l3']['srcip4'] = srcip4.split("-")[0] - dstip4 = value['outer_l3v6']['dstip6'] - traffic['outer_l3']['dstip4'] = dstip4.split("-")[0] - else: - traffic['outer_l3'] = value['outer_l3v4'] - srcip4 = value['outer_l3v4']['srcip4'] - traffic['outer_l3']['srcip4'] = srcip4.split("-")[0] - dstip4 = value['outer_l3v4']['dstip4'] - traffic['outer_l3']['dstip4'] = dstip4.split("-")[0] - - traffic['outer_l3']['type'] = key - traffic['outer_l3']['count'] = value['outer_l3v4']['count'] - # outer_l4 - traffic['outer_l4'] = value['outer_l4'] - index = index + 1 + try: + for key, value in profile_data[traffickey].items(): + framesize = value['outer_l2']['framesize'] + traffic['outer_l2']['framesize'] = framesize + traffic['framesPerSecond'] = True + traffic['bidir'] = False + traffic['outer_l2']['srcmac'] = \ + mac["src_mac_{}".format(traffic['id'])] + traffic['outer_l2']['dstmac'] = \ + mac["dst_mac_{}".format(traffic['id'])] + + # outer_l3 + if "outer_l3v6" in list(value.keys()): + traffic['outer_l3'] = value['outer_l3v6'] + srcip4 = value['outer_l3v6']['srcip6'] + traffic['outer_l3']['srcip4'] = srcip4.split("-")[0] + dstip4 = value['outer_l3v6']['dstip6'] + traffic['outer_l3']['dstip4'] = dstip4.split("-")[0] + else: + traffic['outer_l3'] = value['outer_l3v4'] + srcip4 = value['outer_l3v4']['srcip4'] + traffic['outer_l3']['srcip4'] = srcip4.split("-")[0] + dstip4 = value['outer_l3v4']['dstip4'] + traffic['outer_l3']['dstip4'] = dstip4.split("-")[0] + + traffic['outer_l3']['type'] = key + traffic['outer_l3']['count'] = value['outer_l3v4']['count'] + # outer_l4 + traffic['outer_l4'] = value['outer_l4'] + index = index + 1 + except Exception: + continue + result.update({traffickey: traffic}) return result diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py index cccc0652d..67634a79c 100644 --- a/yardstick/network_services/vnf_generic/vnf/base.py +++ b/yardstick/network_services/vnf_generic/vnf/base.py @@ -106,15 +106,18 @@ class VnfdHelper(dict): if int(virtual_intf['dpdk_port_num']) == port: return interface - def port_num(self, name): + def port_num(self, port): # we need interface name -> DPDK port num (PMD ID) -> LINK ID # LINK ID -> PMD ID is governed by the port mask """ :rtype: int - :type name: str + :type port: str """ - intf = self.find_interface(name=name) + if isinstance(port, dict): + intf = port + else: + intf = self.find_interface(name=port) return int(intf["virtual-interface"]["dpdk_port_num"]) def port_nums(self, intfs): diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py index 557009d30..91530860e 100644 --- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py @@ -282,9 +282,11 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): def setup_vnf_environment(self): self._setup_dpdk() - resource = self._setup_resources() + self.bound_pci = [v['virtual-interface']["vpci"] for v in self.vnfd_helper.interfaces] self.kill_vnf() + # bind before _setup_resources so we can use dpdk_port_num self._detect_and_bind_drivers() + resource = self._setup_resources() return resource def kill_vnf(self): @@ -307,10 +309,13 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): if exit_status != 0: self.ssh_helper.execute("bash %s dpdk >/dev/null 2>&1" % dpdk_setup) - def _setup_resources(self): - interfaces = self.vnfd_helper.interfaces - self.bound_pci = [v['virtual-interface']["vpci"] for v in interfaces] + def get_collectd_options(self): + options = self.scenario_helper.all_options.get("collectd", {}) + # override with specific node settings + options.update(self.scenario_helper.options.get("collectd", {})) + return options + def _setup_resources(self): # what is this magic? how do we know which socket is for which port? # what about quad-socket? if any(v[5] == "0" for v in self.bound_pci): @@ -319,8 +324,14 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): self.socket = 1 cores = self._validate_cpu_cfg() - return ResourceProfile(self.vnfd_helper.mgmt_interface, - interfaces=self.vnfd_helper.interfaces, cores=cores) + # implicit ordering, presumably by DPDK port num, so pre-sort by port_num + # this won't work because we don't have DPDK port numbers yet + ports = sorted(self.vnfd_helper.interfaces, key=self.vnfd_helper.port_num) + port_names = (intf["name"] for intf in ports) + collectd_options = self.get_collectd_options() + plugins = collectd_options.get("plugins", {}) + return ResourceProfile(self.vnfd_helper.mgmt_interface, port_names=port_names, cores=cores, + plugins=plugins, interval=collectd_options.get("interval")) def _detect_and_bind_drivers(self): interfaces = self.vnfd_helper.interfaces diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py index cd9553d12..22aaf6dfb 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py @@ -81,11 +81,13 @@ class IxiaResourceHelper(ClientResourceHelper): latency = stats[0] samples = {} - for port_name in ports: + # this is not DPDK port num, but this is whatever number we gave + # when we selected ports and programmed the profile + for port_num in ports: try: - # this is not DPDK port num, but this is whatever number we gave - # when we selected ports and programmed the profile - port_num = self.vnfd_helper.port_num(port_name) + # reverse lookup port name from port_num so the stats dict is descriptive + intf = self.vnfd_helper.find_interface_by_port(port_num) + port_name = intf["name"] samples[port_name] = { "rx_throughput_kps": float(last_result["Rx_Rate_Kbps"][port_num]), "tx_throughput_kps": float(last_result["Tx_Rate_Kbps"][port_num]), @@ -140,9 +142,9 @@ class IxiaResourceHelper(ClientResourceHelper): mac = {} for vld_id, traffic in static_traffic.items(): intfs = self.vnfd_helper.port_pairs.networks.get(vld_id, []) - interface = next(intfs, None) + interface = next(iter(intfs), None) if interface: - virt_intf = interface["virtual-interface"] + virt_intf = self.vnfd_helper.find_interface(name=interface)["virtual-interface"] # we only know static traffic id by reading the json # this is used by _get_ixia_traffic_profile mac["src_mac_{}".format(traffic["id"])] = virt_intf.get("local_mac", default) @@ -168,24 +170,25 @@ class IxiaResourceHelper(ClientResourceHelper): self.client.ix_stop_traffic() self._queue.put(samples) - except Exception: - LOG.info("Run Traffic terminated") - if not self.rfc_helper.is_done(): - self._terminated.value = 1 - return + if not self.rfc_helper.is_done(): + self._terminated.value = 1 + return + + traffic_profile.execute_traffic(self, self.client, mac, ixia_file) + for _ in range(5): + time.sleep(self.LATENCY_TIME_SLEEP) + self.client.ix_stop_traffic() + samples = self.generate_samples(traffic_profile.ports, 'latency', {}) + self._queue.put(samples) + traffic_profile.start_ixia_latency(self, self.client, mac, ixia_file) + if self._terminated.value: + break - traffic_profile.execute_traffic(self, self.client, mac, ixia_file) - for _ in range(5): - time.sleep(self.LATENCY_TIME_SLEEP) self.client.ix_stop_traffic() - samples = self.generate_samples(traffic_profile.ports, 'latency', {}) - self._queue.put(samples) - traffic_profile.start_ixia_latency(self, self.client, mac, ixia_file) - if self._terminated.value: - break + except Exception: + LOG.exception("Run Traffic terminated") - self.client.ix_stop_traffic() self._terminated.value = 1 def collect_kpi(self): diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py index cd4a008ce..5f1c4d4d3 100644 --- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py @@ -50,13 +50,14 @@ class ConfigCreate(object): config.set(tm_q, 'cfg', '/tmp/full_tm_profile_10G.cfg') return config - def __init__(self, uplink_ports, downlink_ports, socket): + def __init__(self, vnfd_helper, socket): super(ConfigCreate, self).__init__() self.sw_q = -1 self.sink_q = -1 self.n_pipeline = 1 - self.uplink_ports = uplink_ports - self.downlink_ports = downlink_ports + self.vnfd_helper = vnfd_helper + self.uplink_ports = self.vnfd_helper.port_pairs.uplink_ports + self.downlink_ports = self.vnfd_helper.port_pairs.downlink_ports self.pipeline_per_port = 9 self.socket = socket @@ -77,7 +78,7 @@ class ConfigCreate(object): def vpe_rxq(self, config): for port in self.downlink_ports: - new_section = 'RXQ{0}.0'.format(port) + new_section = 'RXQ{0}.0'.format(self.vnfd_helper.port_num(port)) config.add_section(new_section) config.set(new_section, 'mempool', 'MEMPOOL1') @@ -102,7 +103,8 @@ class ConfigCreate(object): for k, v in parser.items(pipeline): if k == "pktq_in": if "RXQ" in v: - value = "RXQ{0}.0".format(self.uplink_ports[index]) + port = self.vnfd_helper.port_num(self.uplink_ports[index]) + value = "RXQ{0}.0".format(port) else: value = self.get_sink_swq(parser, pipeline, k, index) @@ -110,7 +112,8 @@ class ConfigCreate(object): elif k == "pktq_out": if "TXQ" in v: - value = "TXQ{0}.0".format(self.downlink_ports[index]) + port = self.vnfd_helper.port_num(self.downlink_ports[index]) + value = "TXQ{0}.0".format(port) else: self.sw_q += 1 value = self.get_sink_swq(parser, pipeline, k, index) @@ -131,23 +134,25 @@ class ConfigCreate(object): for k, v in parser.items(pipeline): if k == "pktq_in": + port = self.vnfd_helper.port_num(self.downlink_ports[index]) if "RXQ" not in v: value = self.get_sink_swq(parser, pipeline, k, index) elif "TM" in v: - value = "RXQ{0}.0 TM{1}".format(self.downlink_ports[index], index) + value = "RXQ{0}.0 TM{1}".format(port, index) else: - value = "RXQ{0}.0".format(self.downlink_ports[index]) + value = "RXQ{0}.0".format(port) parser.set(pipeline, k, value) if k == "pktq_out": + port = self.vnfd_helper.port_num(self.uplink_ports[index]) if "TXQ" not in v: self.sw_q += 1 value = self.get_sink_swq(parser, pipeline, k, index) elif "TM" in v: - value = "TXQ{0}.0 TM{1}".format(self.uplink_ports[index], index) + value = "TXQ{0}.0 TM{1}".format(port, index) else: - value = "TXQ{0}.0".format(self.uplink_ports[index]) + value = "TXQ{0}.0".format(port) parser.set(pipeline, k, value) @@ -174,14 +179,19 @@ class ConfigCreate(object): def generate_vpe_script(self, interfaces): rules = PipelineRules(pipeline_id=1) - for priv_port, pub_port in zip(self.uplink_ports, self.downlink_ports): - priv_intf = interfaces[priv_port]["virtual-interface"] - pub_intf = interfaces[pub_port]["virtual-interface"] + for uplink_port, downlink_port in zip(self.uplink_ports, self.downlink_ports): - dst_port0_ip = priv_intf["dst_ip"] - dst_port1_ip = pub_intf["dst_ip"] - dst_port0_mac = priv_intf["dst_mac"] - dst_port1_mac = pub_intf["dst_mac"] + uplink_intf = \ + next(intf["virtual-interface"] for intf in interfaces + if intf["name"] == uplink_port) + downlink_intf = \ + next(intf["virtual-interface"] for intf in interfaces + if intf["name"] == downlink_port) + + dst_port0_ip = uplink_intf["dst_ip"] + dst_port1_ip = downlink_intf["dst_ip"] + dst_port0_mac = uplink_intf["dst_mac"] + dst_port1_mac = downlink_intf["dst_mac"] rules.add_firewall_script(dst_port0_ip) rules.next_pipeline() @@ -226,8 +236,7 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper): } self._build_vnf_ports() - vpe_conf = ConfigCreate(self.vnfd_helper.port_pairs.uplink_ports, - self.vnfd_helper.port_pairs.downlink_ports, self.socket) + vpe_conf = ConfigCreate(self.vnfd_helper, self.socket) vpe_conf.create_vpe_config(self.scenario_helper.vnf_cfg) config_basename = posixpath.basename(self.CFG_CONFIG) diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py index 6d7045f58..198eeac6d 100644 --- a/yardstick/orchestrator/kubernetes.py +++ b/yardstick/orchestrator/kubernetes.py @@ -23,6 +23,7 @@ class KubernetesObject(object): self.command = [kwargs.get('command', '/bin/bash')] self.args = kwargs.get('args', []) self.ssh_key = kwargs.get('ssh_key', 'yardstick_key') + self.node_selector = kwargs.get('nodeSelector', {}) self.volumes = [] @@ -37,12 +38,13 @@ class KubernetesObject(object): "template": { "metadata": { "labels": { - "app": "" + "app": name } }, "spec": { "containers": [], - "volumes": [] + "volumes": [], + "nodeSelector": {} } } } @@ -50,6 +52,7 @@ class KubernetesObject(object): self._change_value_according_name(name) self._add_containers() + self._add_node_selector() self._add_ssh_key_volume() self._add_volumes() @@ -88,6 +91,11 @@ class KubernetesObject(object): return container + def _add_node_selector(self): + utils.set_dict_value(self.template, + 'spec.template.spec.nodeSelector', + self.node_selector) + def _add_volumes(self): utils.set_dict_value(self.template, 'spec.template.spec.volumes', @@ -106,6 +114,35 @@ class KubernetesObject(object): self._add_volume(key_volume) +class ServiceObject(object): + + def __init__(self, name): + self.name = '{}-service'.format(name) + self.template = { + 'metadata': { + 'name': '{}-service'.format(name) + }, + 'spec': { + 'type': 'NodePort', + 'ports': [ + { + 'port': 22, + 'protocol': 'TCP' + } + ], + 'selector': { + 'app': name + } + } + } + + def create(self): + k8s_utils.create_service(self.template) + + def delete(self): + k8s_utils.delete_service(self.name) + + class KubernetesTemplate(object): def __init__(self, name, template_cfg): @@ -117,6 +154,8 @@ class KubernetesTemplate(object): ssh_key=self.ssh_key, **cfg) for rc, cfg in template_cfg.items()] + self.service_objs = [ServiceObject(s) for s in self.rcs] + self.pods = [] def _get_rc_name(self, rc_name): |