diff options
47 files changed, 1143 insertions, 350 deletions
diff --git a/docker/Dockerfile b/docker/Dockerfile index 709db317e..95ec1857e 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -47,8 +47,8 @@ ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-d COPY ./exec_tests.sh /usr/local/bin/ -ENV NSB_DIR="/opt/nsb_bin" \ - PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl" +ENV NSB_DIR="/opt/nsb_bin" +ENV PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl" WORKDIR ${REPOS_DIR} CMD ["/usr/bin/supervisord"] diff --git a/docs/testing/user/userguide/14-nsb-operation.rst b/docs/testing/user/userguide/14-nsb-operation.rst index d157914a9..851c6528e 100644 --- a/docs/testing/user/userguide/14-nsb-operation.rst +++ b/docs/testing/user/userguide/14-nsb-operation.rst @@ -84,6 +84,116 @@ In this example we have ``TRex xe0 <-> xe0 VNF xe1 <-> xe0 UDP_Replay`` downlink_0: - xe0 + +Availability zone +^^^^^^^^^^^^^^^^^ + +The configuration of the availability zone is requred in cases where location +of exact compute host/group of compute hosts needs to be specified for SampleVNF +or traffic generator in the heat test case. If this is the case, please follow +the instructions below. + +.. _`Create a host aggregate`: + +1. Create a host aggregate in the OpenStack and add the available compute hosts + into the aggregate group. + + .. note:: Change the ``<AZ_NAME>`` (availability zone name), ``<AGG_NAME>`` + (host aggregate name) and ``<HOST>`` (host name of one of the compute) in the + commands below. + + .. code-block:: bash + + # create host aggregate + openstack aggregate create --zone <AZ_NAME> --property availability_zone=<AZ_NAME> <AGG_NAME> + # show available hosts + openstack compute service list --service nova-compute + # add selected host into the host aggregate + openstack aggregate add host <AGG_NAME> <HOST> + +2. To specify the OpenStack location (the exact compute host or group of the hosts) + of SampleVNF or traffic generator in the heat test case, the ``availability_zone`` server + configuration option should be used. For example: + + .. note:: The ``<AZ_NAME>`` (availability zone name) should be changed according + to the name used during the host aggregate creation steps above. + + .. code-block:: yaml + + context: + name: yardstick + image: yardstick-samplevnfs + ... + servers: + vnf__0: + ... + availability_zone: <AZ_NAME> + ... + tg__0: + ... + availability_zone: <AZ_NAME> + ... + networks: + ... + +There are two example of SampleVNF scale out test case which use the availability zone +feature to specify the exact location of scaled VNFs and traffic generators. + +Those are: + +.. code-block:: console + + <repo>/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml + <repo>/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_out.yaml + +.. note:: This section describes the PROX scale-out testcase, but the same + procedure is used for the vFW test case. + +1. Before running the scale-out test case, make sure the host aggregates are + configured in the OpenStack environment. To check this, run the following + command: + + .. code-block:: console + + # show configured host aggregates (example) + openstack aggregate list + +----+------+-------------------+ + | ID | Name | Availability Zone | + +----+------+-------------------+ + | 4 | agg0 | AZ_NAME_0 | + | 5 | agg1 | AZ_NAME_1 | + +----+------+-------------------+ + +2. If no host aggregates are configured, please use `steps above`__ to + configure them. + +__ `Create a host aggregate`_ + + +3. Run the SampleVNF PROX scale-out test case, specifying the availability + zone of each VNF and traffic generator as a task arguments. + + .. note:: The ``az_0`` and ``az_1`` should be changed according to the host + aggregates created in the OpenStack. + + .. code-block:: console + + yardstick -d task start\ + <repo>/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml\ + --task-args='{ + "num_vnfs": 4, "availability_zone": { + "vnf_0": "az_0", "tg_0": "az_1", + "vnf_1": "az_0", "tg_1": "az_1", + "vnf_2": "az_0", "tg_2": "az_1", + "vnf_3": "az_0", "tg_3": "az_1" + } + }' + + ``num_vnfs`` specifies how many VNFs are going to be deployed in the + ``heat`` contexts. ``vnf_X`` and ``tg_X`` arguments configure the + availability zone where the VNF and traffic generator is going to be deployed. + + Collectd KPIs ------------- @@ -242,7 +352,7 @@ Baremetal file: /etc/yardstick/nodes/pod.yaml Scale-Out --------------------- +--------- VNFs performance data with scale-out helps diff --git a/requirements.txt b/requirements.txt index 02545de1d..cd795fdd9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,6 +14,7 @@ PTable==0.9.2 # BSD (3 clause); OSI Approved BSD License ansible==2.4.2 # GPLv3; OSI Approved GNU General Public License v3 or later (GPLv3+) backport-ipaddress==0.1; python_version <= "2.7" # OSI Approved Python Software Foundation License chainmap==1.0.2 # Python Software Foundation License; OSI Approved Python Software Foundation License +cmd2==0.8.6 # MIT License; OSI Approved MIT License django==1.8.17 # BSD; OSI Approved BSD License # NOTE(ralonsoh): django must be bumped to 1.11.8; consider the migration notes [1] # [1] https://docs.djangoproject.com/ja/1.11/ref/templates/upgrading/ @@ -37,6 +38,7 @@ os-client-config==1.28.0 # OSI Approved Apache Software License osc-lib==1.7.0 # OSI Approved Apache Software License oslo.config==4.11.1 # OSI Approved Apache Software License oslo.i18n==3.17.0 # OSI Approved Apache Software License +oslo.messaging===5.36.0 # OSI Approved Apache Software License oslo.privsep===1.22.1 # OSI Approved Apache Software License oslo.serialization==2.20.1 # OSI Approved Apache Software License oslo.utils==3.28.0 # OSI Approved Apache Software License diff --git a/samples/vnf_samples/nsut/prox/prox-tg-topology-scale-out.yaml b/samples/vnf_samples/nsut/prox/prox-tg-topology-scale-out.yaml new file mode 100644 index 000000000..5f01ecb7e --- /dev/null +++ b/samples/vnf_samples/nsut/prox/prox-tg-topology-scale-out.yaml @@ -0,0 +1,53 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +{% set num_vnfs = get(extra_args, 'num_vnfs', 1) %} +--- +nsd:nsd-catalog: + nsd: + - id: prox-tg-topology + name: prox-tg-topology + short-name: prox-tg-topology + description: prox-tg-topology + constituent-vnfd: +{% for vnf_num in range(num_vnfs|int) %} + - member-vnf-index: '{{ (vnf_num * 2) + 1 }}' + vnfd-id-ref: tg__{{ vnf_num }} + VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml + - member-vnf-index: '{{ (vnf_num * 2) + 2 }}' + vnfd-id-ref: vnf__{{ vnf_num }} + VNF model: ../../vnf_descriptors/prox_vnf.yaml +{% endfor %} + vld: +{% for vnf_num in range(num_vnfs|int) %} + - id: uplink_{{ vnf_num }} + name: tg__{{ vnf_num }} to vnf__{{ vnf_num }} link {{ (vnf_num * 2) + 1 }} + type: ELAN + vnfd-connection-point-ref: + - member-vnf-index-ref: '{{ (vnf_num * 2) + 1 }}' + vnfd-connection-point-ref: xe0 + vnfd-id-ref: tg__{{ vnf_num }} + - member-vnf-index-ref: '{{ (vnf_num * 2) + 2 }}' + vnfd-connection-point-ref: xe0 + vnfd-id-ref: vnf__{{ vnf_num }} + - id: downlink_{{ vnf_num }} + name: vnf__{{ vnf_num }} to tg__{{ vnf_num }} link {{ (vnf_num * 2) + 2 }} + type: ELAN + vnfd-connection-point-ref: + - member-vnf-index-ref: '{{ (vnf_num * 2) + 1 }}' + vnfd-connection-point-ref: xe1 + vnfd-id-ref: vnf__{{ vnf_num }} + - member-vnf-index-ref: '{{ (vnf_num * 2) + 2 }}' + vnfd-connection-point-ref: xe1 + vnfd-id-ref: tg__{{ vnf_num }} +{% endfor %} diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml new file mode 100644 index 000000000..88581d2da --- /dev/null +++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml @@ -0,0 +1,113 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +{% set num_vnfs = num_vnfs or 1 %} +{% set availability_zone = availability_zone or {} %} +--- +schema: "yardstick:task:0.1" +scenarios: +- + type: NSPerf + traffic_profile: ../../traffic_profiles/prox_binsearch.yaml + topology: prox-tg-topology-scale-out.yaml + extra_args: + num_vnfs: {{ num_vnfs }} + + nodes: +{% for vnf_num in range(num_vnfs|int) %} + tg__{{ vnf_num }}: tg_{{ vnf_num }}.yardstick + vnf__{{ vnf_num }}: vnf_{{ vnf_num }}.yardstick +{% endfor %} + + options: +{% for vnf_num in range(num_vnfs|int) %} + vnf__{{ vnf_num }}: + prox_path: /opt/nsb_bin/prox + prox_config: "configs/handle_l2fwd_multiflow-2.cfg" + prox_args: + "-t": "" + + tg__{{ vnf_num }}: + prox_path: /opt/nsb_bin/prox + prox_config: "configs/gen_l2fwd_multiflow-2.cfg" + prox_args: + "-e": "" + "-t": "" +{% endfor %} + + runner: + type: Duration + # we kill after duration, independent of test duration, so set this high + duration: 300 + +context: + name: yardstick + image: yardstick-samplevnfs + user: ubuntu + flavor: + vcpus: 8 + ram: 20480 + disk: 10 + extra_specs: + hw:cpu_sockets: 1 + hw:cpu_cores: 8 + hw:cpu_threads: 1 + placement_groups: + pgrp1: + policy: "availability" + + servers: +{% for vnf_num in range(num_vnfs|int) %} + vnf_{{ vnf_num }}: + floating_ip: true + placement: "pgrp1" + {% if 'vnf_%s'|format(vnf_num) in availability_zone %} + availability_zone: "{{ availability_zone['vnf_%s'|format(vnf_num)] }}" + {% endif %} + network_ports: + mgmt: + - mgmt + uplink_{{ vnf_num }}: + - xe0 + downlink_{{ vnf_num }}: + - xe1 + tg_{{ vnf_num }}: + floating_ip: true + placement: "pgrp1" + {% if 'tg_%s'|format(vnf_num) in availability_zone %} + availability_zone: "{{ availability_zone['tg_%s'|format(vnf_num)] }}" + {% endif %} + network_ports: + mgmt: + - mgmt + uplink_{{ vnf_num }}: + - xe0 + downlink_{{ vnf_num }}: + - xe1 +{% endfor %} + + networks: + mgmt: + cidr: '10.0.1.0/24' +{% for vnf_num in range(num_vnfs|int) %} + uplink_{{ vnf_num }}: + cidr: '10.0.{{ (vnf_num * 2) + 2 }}.0/24' + gateway_ip: 'null' + port_security_enabled: False + enable_dhcp: 'false' + downlink_{{ vnf_num }}: + cidr: '10.0.{{ (vnf_num * 2) + 3 }}.0/24' + gateway_ip: 'null' + port_security_enabled: False + enable_dhcp: 'false' +{% endfor %} diff --git a/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_out.yaml b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_out.yaml new file mode 100644 index 000000000..f1408931f --- /dev/null +++ b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_out.yaml @@ -0,0 +1,114 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +{% set num_vnfs = num_vnfs or 1 %} +{% set availability_zone = availability_zone or {} %} +--- +schema: yardstick:task:0.1 +scenarios: +- type: NSPerf + traffic_profile: ../../traffic_profiles/ipv4_throughput_scale_out.yaml + topology: vfw_tg_topology_scale_out.yaml + extra_args: + num_vnfs: {{ num_vnfs }} + nodes: +{% for vnf_num in range(num_vnfs|int) %} + tg__{{ vnf_num }}: tg_{{ vnf_num }}.yardstick + vnf__{{ vnf_num }}: vnf_{{ vnf_num }}.yardstick +{% endfor %} + options: + framesize: + uplink: {64B: 100} + downlink: {64B: 100} + flow: + src_ip: +{% for vnf_num in range(num_vnfs|int) %} + - {'tg__{{ vnf_num }}': 'xe0'} +{% endfor %} + dst_ip: +{% for vnf_num in range(num_vnfs|int) %} + - {'tg__{{ vnf_num }}': 'xe1'} +{% endfor %} + count: 1 + traffic_type: 4 + rfc2544: + allowed_drop_rate: 0.0001 - 0.0001 +{% for vnf_num in range(num_vnfs|int) %} + vnf__{{ vnf_num }}: + rules: acl_1rule.yaml + vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1} +{% endfor %} + runner: + type: Iteration + iterations: 10 + interval: 35 +context: + # put node context first, so we don't HEAT deploy if node has errors + name: yardstick + image: yardstick-samplevnfs + flavor: + vcpus: 10 + ram: 20480 + disk: 6 + extra_specs: + hw:cpu_sockets: 1 + hw:cpu_cores: 10 + hw:cpu_threads: 1 + user: ubuntu + placement_groups: + pgrp1: + policy: "availability" + servers: +{% for vnf_num in range(num_vnfs|int) %} + vnf_{{ vnf_num }}: + floating_ip: true + placement: "pgrp1" + {% if 'vnf_%s'|format(vnf_num) in availability_zone %} + availability_zone: "{{ availability_zone['vnf_%s'|format(vnf_num)] }}" + {% endif %} + network_ports: + mgmt: + - mgmt + uplink_{{ vnf_num }}: + - xe0 + downlink_{{ vnf_num }}: + - xe1 + tg_{{ vnf_num }}: + floating_ip: true + placement: "pgrp1" + {% if 'tg_%s'|format(vnf_num) in availability_zone %} + availability_zone: "{{ availability_zone['tg_%s'|format(vnf_num)] }}" + {% endif %} + network_ports: + mgmt: + - mgmt + uplink_{{ vnf_num }}: + - xe0 + downlink_{{ vnf_num }}: + - xe1 +{% endfor %} + networks: + mgmt: + cidr: '10.0.1.0/24' +{% for vnf_num in range(num_vnfs|int) %} + uplink_{{ vnf_num }}: + cidr: '10.0.{{ (vnf_num * 2) + 2 }}.0/24' + gateway_ip: 'null' + port_security_enabled: False + enable_dhcp: 'false' + downlink_{{ vnf_num }}: + cidr: '10.0.{{ (vnf_num * 2) + 3 }}.0/24' + gateway_ip: 'null' + port_security_enabled: False + enable_dhcp: 'false' +{% endfor %} diff --git a/samples/vnf_samples/nsut/vfw/vfw_tg_topology_scale_out.yaml b/samples/vnf_samples/nsut/vfw/vfw_tg_topology_scale_out.yaml new file mode 100644 index 000000000..8bd01b7f2 --- /dev/null +++ b/samples/vnf_samples/nsut/vfw/vfw_tg_topology_scale_out.yaml @@ -0,0 +1,53 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +{% set num_vnfs = get(extra_args, 'num_vnfs', 1) %} +--- +nsd:nsd-catalog: + nsd: + - id: 3tg-topology + name: 3tg-topology + short-name: 3tg-topology + description: 3tg-topology + constituent-vnfd: +{% for vnf_num in range(num_vnfs|int) %} + - member-vnf-index: '{{ (vnf_num * 2) + 1 }}' + vnfd-id-ref: tg__{{ vnf_num }} + VNF model: ../../vnf_descriptors/tg_rfc2544_tpl.yaml + - member-vnf-index: '{{ (vnf_num * 2) + 2 }}' + vnfd-id-ref: vnf__{{ vnf_num }} + VNF model: ../../vnf_descriptors/vfw_vnf.yaml +{% endfor %} + vld: +{% for vnf_num in range(num_vnfs|int) %} + - id: uplink_{{ vnf_num }} + name: tg__{{ vnf_num }} to vnf__{{ vnf_num }} link {{ (vnf_num * 2) + 1 }} + type: ELAN + vnfd-connection-point-ref: + - member-vnf-index-ref: '{{ (vnf_num * 2) + 1 }}' + vnfd-connection-point-ref: xe0 + vnfd-id-ref: tg__{{ vnf_num }} + - member-vnf-index-ref: '{{ (vnf_num * 2) + 2 }}' + vnfd-connection-point-ref: xe0 + vnfd-id-ref: vnf__{{ vnf_num }} + - id: downlink_{{ vnf_num }} + name: vnf__{{ vnf_num }} to tg__{{ vnf_num }} link {{ (vnf_num * 2) + 2 }} + type: ELAN + vnfd-connection-point-ref: + - member-vnf-index-ref: '{{ (vnf_num * 2) + 2 }}' + vnfd-connection-point-ref: xe1 + vnfd-id-ref: vnf__{{ vnf_num }} + - member-vnf-index-ref: '{{ (vnf_num * 2) + 1 }}' + vnfd-connection-point-ref: xe1 + vnfd-id-ref: tg__{{ vnf_num }} +{% endfor %} diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_scale_out.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_scale_out.yaml new file mode 100644 index 000000000..71e9e817b --- /dev/null +++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_scale_out.yaml @@ -0,0 +1,102 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flow definition for ACL tests - 1K flows - ipv4 only +# +# the number of flows defines the widest range of parameters +# for example if srcip_range=1.0.0.1-1.0.0.255 and dst_ip_range=10.0.0.1-10.0.1.255 +# and it should define only 16 flows +# +#there is assumption that packets generated will have a random sequences of following addresses pairs +# in the packets +# 1. src=1.x.x.x(x.x.x =random from 1..255) dst=10.x.x.x (random from 1..512) +# 2. src=1.x.x.x(x.x.x =random from 1..255) dst=10.x.x.x (random from 1..512) +# ... +# 512. src=1.x.x.x(x.x.x =random from 1..255) dst=10.x.x.x (random from 1..512) +# +# not all combination should be filled +# Any other field with random range will be added to flow definition +# +# the example.yaml provides all possibilities for traffic generation +# +# the profile defines a public and private side to make limited traffic correlation +# between private and public side same way as it is made by IXIA solution. +# +{% set num_vnfs = get(extra_args, 'num_vnfs', 1) %} +--- +schema: "nsb:traffic_profile:0.1" +name: rfc2544 +description: Traffic profile to run RFC2544 latency +traffic_profile: + traffic_type : RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput + frame_rate : 100 # pc of linerate + # that specifies a range (e.g. ipv4 address, port) +{% for vnf_num in range(num_vnfs|int) %} +uplink_{{ vnf_num }}: + ipv4: + id: {{ (vnf_num * 2) + 1 }} + outer_l2: + framesize: + 64B: "{{get(imix, 'imix.uplink.64B', '0') }}" + 128B: "{{get(imix, 'imix.uplink.128B', '0') }}" + 256B: "{{get(imix, 'imix.uplink.256B', '0') }}" + 373B: "{{get(imix, 'imix.uplink.373B', '0') }}" + 512B: "{{get(imix, 'imix.uplink.512B', '0') }}" + 570B: "{{get(imix, 'imix.uplink.570B', '0') }}" + 1024B: "{{get(imix, 'imix.uplink.1024B', '0') }}" + 1280B: "{{get(imix, 'imix.uplink.1280B', '0') }}" + 1400B: "{{get(imix, 'imix.uplink.1400B', '0') }}" + 1500B: "{{get(imix, 'imix.uplink.1500B', '0') }}" + 1518B: "{{get(imix, 'imix.uplink.1518B', '0') }}" + + outer_l3v4: + proto: "udp" + srcip4: "{{get(flow, 'flow.src_ip_{{ vnf_num }}', '10.0.2.1-10.0.2.255') }}" + dstip4: "{{get(flow, 'flow.dst_ip_{{ vnf_num }}', '10.0.3.1-10.0.3.255') }}" + count: "{{get(flow, 'flow.count', '1') }}" + ttl: 32 + dscp: 0 + outer_l4: + srcport: "{{get(flow, 'flow.src_port_{{ vnf_num }}', '1234-4321') }}" + dstport: "{{get(flow, 'flow.dst_port_{{ vnf_num }}', '2001-4001') }}" + count: "{{get(flow, 'flow.count', '1') }}" +downlink_{{ vnf_num }}: + ipv4: + id: {{ (vnf_num * 2) + 2 }} + outer_l2: + framesize: + 64B: "{{ get(imix, 'imix.downlink.64B', '0') }}" + 128B: "{{ get(imix, 'imix.downlink.128B', '0') }}" + 256B: "{{ get(imix, 'imix.downlink.256B', '0') }}" + 373b: "{{ get(imix, 'imix.downlink.373B', '0') }}" + 512B: "{{ get(imix, 'imix.downlink.512B', '0') }}" + 570B: "{{get(imix, 'imix.downlink.570B', '0') }}" + 1024B: "{{get(imix, 'imix.downlink.1024B', '0') }}" + 1280B: "{{get(imix, 'imix.downlink.1280B', '0') }}" + 1400B: "{{get(imix, 'imix.downlink.1400B', '0') }}" + 1500B: "{{get(imix, 'imix.downlink.1500B', '0') }}" + 1518B: "{{get(imix, 'imix.downlink.1518B', '0') }}" + + outer_l3v4: + proto: "udp" + srcip4: "{{get(flow, 'flow.dst_ip_{{ vnf_num }}', '10.0.3.1-10.0.3.255') }}" + dstip4: "{{get(flow, 'flow.src_ip_{{ vnf_num }}', '10.0.2.1-10.0.2.255') }}" + count: "{{get(flow, 'flow.count', '1') }}" + ttl: 32 + dscp: 0 + outer_l4: + srcport: "{{get(flow, 'flow.dst_port_{{ vnf_num }}', '1234-4321') }}" + dstport: "{{get(flow, 'flow.src_port_{{ vnf_num }}', '2001-4001') }}" + count: "{{get(flow, 'flow.count', '1') }}" +{% endfor %} diff --git a/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py b/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py index 367072e84..9d94e3d0b 100644 --- a/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py +++ b/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py @@ -17,9 +17,7 @@ import unittest import os -from yardstick.error import IncorrectConfig, SSHError -from yardstick.error import IncorrectNodeSetup -from yardstick.error import IncorrectSetup +from yardstick.common import exceptions from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkInterface from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkNode from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper @@ -142,12 +140,13 @@ class TestDpdkInterface(unittest.TestCase): def test_probe_missing_values_negative(self): mock_dpdk_node = mock.Mock() - mock_dpdk_node.netdevs.values.side_effect = IncorrectNodeSetup + mock_dpdk_node.netdevs.values.side_effect = ( + exceptions.IncorrectNodeSetup(error_msg='')) interface = {'local_mac': '0a:de:ad:be:ef:f5'} dpdk_intf = DpdkInterface(mock_dpdk_node, interface) - with self.assertRaises(IncorrectConfig): + with self.assertRaises(exceptions.IncorrectConfig): dpdk_intf.probe_missing_values() @@ -213,7 +212,7 @@ class TestDpdkNode(unittest.TestCase): def test_check(self): def update(): if not mock_force_rebind.called: - raise IncorrectConfig + raise exceptions.IncorrectConfig(error_msg='') interfaces[0]['virtual-interface'].update({ 'vpci': '0000:01:02.1', @@ -244,11 +243,11 @@ class TestDpdkNode(unittest.TestCase): mock_ssh_helper = mock.Mock() mock_ssh_helper.execute.return_value = 0, '', '' - mock_intf_type().check.side_effect = SSHError + mock_intf_type().check.side_effect = exceptions.SSHError dpdk_node = DpdkNode(NAME, self.INTERFACES, mock_ssh_helper) - with self.assertRaises(IncorrectSetup): + with self.assertRaises(exceptions.IncorrectSetup): dpdk_node.check() def test_probe_netdevs(self): diff --git a/tests/unit/network_services/nfvi/test_resource.py b/tests/unit/network_services/nfvi/test_resource.py index f5f7f0fe7..741f9a6cc 100644 --- a/tests/unit/network_services/nfvi/test_resource.py +++ b/tests/unit/network_services/nfvi/test_resource.py @@ -19,6 +19,8 @@ import unittest from yardstick.network_services.nfvi.resource import ResourceProfile from yardstick.network_services.nfvi import resource, collectd +from yardstick.common.exceptions import ResourceCommandError +from yardstick.common.exceptions import SSHError class TestResourceProfile(unittest.TestCase): @@ -128,8 +130,31 @@ class TestResourceProfile(unittest.TestCase): self.assertEqual(val, ('error', 'Invalid', '', '')) def test__start_collectd(self): - self.assertIsNone( - self.resource_profile._start_collectd(self.ssh_mock, "/opt/nsb_bin")) + ssh_mock = mock.Mock() + ssh_mock.execute = mock.Mock(return_value=(0, "", "")) + self.assertIsNone(self.resource_profile._start_collectd(ssh_mock, + "/opt/nsb_bin")) + + ssh_mock.execute = mock.Mock(side_effect=SSHError) + with self.assertRaises(SSHError): + self.resource_profile._start_collectd(ssh_mock, "/opt/nsb_bin") + + ssh_mock.execute = mock.Mock(return_value=(1, "", "")) + self.assertIsNone(self.resource_profile._start_collectd(ssh_mock, + "/opt/nsb_bin")) + + def test__start_rabbitmq(self): + ssh_mock = mock.Mock() + ssh_mock.execute = mock.Mock(return_value=(0, "RabbitMQ", "")) + self.assertIsNone(self.resource_profile._start_rabbitmq(ssh_mock)) + + ssh_mock.execute = mock.Mock(return_value=(0, "", "")) + with self.assertRaises(ResourceCommandError): + self.resource_profile._start_rabbitmq(ssh_mock) + + ssh_mock.execute = mock.Mock(return_value=(1, "", "")) + with self.assertRaises(ResourceCommandError): + self.resource_profile._start_rabbitmq(ssh_mock) def test__prepare_collectd_conf(self): self.assertIsNone( @@ -154,11 +179,12 @@ class TestResourceProfile(unittest.TestCase): def test_initiate_systemagent(self): self.resource_profile._start_collectd = mock.Mock() + self.resource_profile._start_rabbitmq = mock.Mock() self.assertIsNone( self.resource_profile.initiate_systemagent("/opt/nsb_bin")) def test_initiate_systemagent_raise(self): - self.resource_profile._start_collectd = mock.Mock(side_effect=RuntimeError) + self.resource_profile._start_rabbitmq = mock.Mock(side_effect=RuntimeError) with self.assertRaises(RuntimeError): self.resource_profile.initiate_systemagent("/opt/nsb_bin") diff --git a/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py index 2971b55eb..efde669d2 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py @@ -29,6 +29,7 @@ stl_patch.start() if stl_patch: from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxVnf from yardstick.network_services.nfvi.resource import ResourceProfile + from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxSetupEnvSetupEnvHelper TEST_FILE_YAML = 'nsb_test_case.yaml' @@ -345,3 +346,27 @@ class TestAclApproxVnf(unittest.TestCase): acl_approx_vnf.dpdk_devbind = "dpdk-devbind.py" acl_approx_vnf._resource_collect_stop = mock.Mock() self.assertIsNone(acl_approx_vnf.terminate()) + + +class TestAclApproxSetupEnvSetupEnvHelper(unittest.TestCase): + + @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open') + @mock.patch.object(utils, 'find_relative_file') + @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig') + @mock.patch.object(utils, 'open_relative_file') + def test_build_config(self, *args): + vnfd_helper = mock.Mock() + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.vnf_cfg = {'lb_config': 'HW'} + scenario_helper.all_options = {} + + acl_approx_setup_helper = AclApproxSetupEnvSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + acl_approx_setup_helper.ssh_helper.provision_tool = mock.Mock(return_value='tool_path') + acl_approx_setup_helper.ssh_helper.all_ports = mock.Mock() + acl_approx_setup_helper.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1]) + expected = 'sudo tool_path -p 0x3 -f /tmp/acl_config -s /tmp/acl_script --hwlb 3' + self.assertEqual(acl_approx_setup_helper.build_config(), expected) diff --git a/tests/unit/network_services/vnf_generic/vnf/test_base.py b/tests/unit/network_services/vnf_generic/vnf/test_base.py index 664373f8f..9ef6473f0 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_base.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_base.py @@ -215,9 +215,11 @@ class TestGenericVNF(unittest.TestCase): with self.assertRaises(TypeError) as exc: # pylint: disable=abstract-class-instantiated base.GenericVNF('vnf1', VNFD['vnfd:vnfd-catalog']['vnfd'][0]) - msg = ("Can't instantiate abstract class GenericVNF with abstract " - "methods collect_kpi, instantiate, scale, terminate, " - "wait_for_instantiate") + + msg = ("Can't instantiate abstract class GenericVNF with abstract methods " + "collect_kpi, instantiate, scale, start_collect, " + "stop_collect, terminate, wait_for_instantiate") + self.assertEqual(msg, str(exc.exception)) diff --git a/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py index edaa0ad7e..b7731b649 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py @@ -20,7 +20,7 @@ import mock from tests.unit import STL_MOCKS from tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh - +from yardstick.common import utils STLClient = mock.MagicMock() stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) @@ -79,6 +79,27 @@ link 1 up with self.assertRaises(NotImplementedError): helper.scale() + @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open') + @mock.patch.object(utils, 'find_relative_file') + @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig') + @mock.patch.object(utils, 'open_relative_file') + def test_build_config(self, *args): + vnfd_helper = mock.Mock() + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.vnf_cfg = {'lb_config': 'HW'} + scenario_helper.all_options = {} + + cgnat_approx_setup_helper = CgnaptApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + cgnat_approx_setup_helper.ssh_helper.provision_tool = mock.Mock(return_value='tool_path') + cgnat_approx_setup_helper.ssh_helper.all_ports = mock.Mock() + cgnat_approx_setup_helper.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1]) + expected = 'sudo tool_path -p 0x3 -f /tmp/cgnapt_config -s /tmp/cgnapt_script --hwlb 3' + self.assertEqual(cgnat_approx_setup_helper.build_config(), expected) + @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Process") class TestCgnaptApproxVnf(unittest.TestCase): diff --git a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py index eb59c2837..ff71bed9d 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py @@ -607,12 +607,15 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase): dpdk_setup_helper.CFG_SCRIPT = 'script' dpdk_setup_helper.pipeline_kwargs = {} dpdk_setup_helper.all_ports = [0, 1, 2] + dpdk_setup_helper.scenario_helper.vnf_cfg = {'lb_config': 'HW', + 'worker_threads': 1} expected = { 'cfg_file': 'config', 'script': 'script', 'port_mask_hex': '0x3', 'tool_path': 'tool_path', + 'hwlb': ' --hwlb 1', } dpdk_setup_helper._build_pipeline_kwargs() self.assertDictEqual(dpdk_setup_helper.pipeline_kwargs, expected) @@ -1714,6 +1717,64 @@ class TestSampleVnf(unittest.TestCase): self.assertIsNone(sample_vnf.instantiate(scenario_cfg, {})) self.assertEqual(sample_vnf.nfvi_context, context2) + def test__update_collectd_options(self): + scenario_cfg = {'options': + {'collectd': + {'interval': 3, + 'plugins': + {'plugin3': {'param': 3}}}, + 'vnf__0': + {'collectd': + {'interval': 2, + 'plugins': + {'plugin3': {'param': 2}, + 'plugin2': {'param': 2}}}}}} + context_cfg = {'nodes': + {'vnf__0': + {'collectd': + {'interval': 1, + 'plugins': + {'plugin3': {'param': 1}, + 'plugin2': {'param': 1}, + 'plugin1': {'param': 1}}}}}} + expected = {'interval': 1, + 'plugins': + {'plugin3': {'param': 1}, + 'plugin2': {'param': 1}, + 'plugin1': {'param': 1}}} + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + sample_vnf = SampleVNF('vnf__0', vnfd) + sample_vnf._update_collectd_options(scenario_cfg, context_cfg) + self.assertEqual(sample_vnf.setup_helper.collectd_options, expected) + + def test__update_options(self): + options1 = {'interval': 1, + 'param1': 'value1', + 'plugins': + {'plugin3': {'param': 3}, + 'plugin2': {'param': 1}, + 'plugin1': {'param': 1}}} + options2 = {'interval': 2, + 'param2': 'value2', + 'plugins': + {'plugin4': {'param': 4}, + 'plugin2': {'param': 2}, + 'plugin1': {'param': 2}}} + expected = {'interval': 1, + 'param1': 'value1', + 'param2': 'value2', + 'plugins': + {'plugin4': {'param': 4}, + 'plugin3': {'param': 3}, + 'plugin2': {'param': 1}, + 'plugin1': {'param': 1}}} + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + sample_vnf = SampleVNF('vnf1', vnfd) + sample_vnf._update_options(options2, options1) + self.assertEqual(options2, expected) + @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") @mock.patch("yardstick.ssh.SSH") def test_wait_for_instantiate_empty_queue(self, ssh, *args): diff --git a/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py index 48fc87ed4..aaad66381 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py @@ -30,6 +30,7 @@ stl_patch.start() if stl_patch: from yardstick.network_services.vnf_generic.vnf.vfw_vnf import FWApproxVnf from yardstick.network_services.nfvi.resource import ResourceProfile + from yardstick.network_services.vnf_generic.vnf.vfw_vnf import FWApproxSetupEnvHelper TEST_FILE_YAML = 'nsb_test_case.yaml' @@ -349,3 +350,25 @@ pipeline> 'rules': ""}} self.scenario_cfg.update({"nodes": {"vnf__1": ""}}) self.assertIsNone(vfw_approx_vnf.instantiate(self.scenario_cfg, self.context_cfg)) + + +class TestFWApproxSetupEnvHelper(unittest.TestCase): + + @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open') + @mock.patch.object(utils, 'find_relative_file') + @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig') + @mock.patch.object(utils, 'open_relative_file') + def test_build_config(self, *args): + vnfd_helper = mock.Mock() + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.vnf_cfg = {'lb_config': 'HW'} + scenario_helper.all_options = {} + + vfw_approx_setup_helper = FWApproxSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) + + vfw_approx_setup_helper.ssh_helper.provision_tool = mock.Mock(return_value='tool_path') + vfw_approx_setup_helper.ssh_helper.all_ports = mock.Mock() + vfw_approx_setup_helper.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1]) + expected = 'sudo tool_path -p 0x3 -f /tmp/vfw_config -s /tmp/vfw_script --hwlb 3' + self.assertEqual(vfw_approx_setup_helper.build_config(), expected) diff --git a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py index 8c45d973e..9857e95b6 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py @@ -18,7 +18,7 @@ import os import time import mock -import six.moves.configparser as configparser +from six.moves import configparser import unittest from tests.unit import STL_MOCKS @@ -663,7 +663,14 @@ class TestVpeApproxVnf(unittest.TestCase): vpe_approx_vnf.ssh_helper.bin_path = mock.Mock() vpe_approx_vnf.ssh_helper.upload_config_file = mock.MagicMock() self.assertIsNone(vpe_approx_vnf._build_vnf_ports()) - self.assertIsNotNone(vpe_approx_vnf.build_config()) + + vpe_approx_vnf.ssh_helper.provision_tool = mock.Mock(return_value='tool_path') + vpe_approx_vnf.ssh_helper.all_ports = mock.Mock() + vpe_approx_vnf.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1]) + vpe_approx_vnf.scenario_helper.vnf_cfg = {'lb_config': 'HW'} + + expected = 'sudo tool_path -p 0x3 -f /tmp/vpe_config -s /tmp/vpe_script --hwlb 3' + self.assertEqual(vpe_approx_vnf.build_config(), expected) @mock.patch(SSH_HELPER) def test_wait_for_instantiate(self, ssh): diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py index ae8319e37..0707c1c5b 100644 --- a/yardstick/benchmark/contexts/base.py +++ b/yardstick/benchmark/contexts/base.py @@ -42,20 +42,12 @@ class Context(object): list = [] SHORT_TASK_ID_LEN = 8 - @staticmethod - def split_name(name, sep='.'): - try: - name_iter = iter(name.split(sep)) - except AttributeError: - # name is not a string - return None, None - return next(name_iter), next(name_iter, None) - - def __init__(self): + def __init__(self, host_name_separator='.'): Context.list.append(self) self._flags = Flags() self._name = None self._task_id = None + self._host_name_separator = host_name_separator def init(self, attrs): """Initiate context""" @@ -65,6 +57,12 @@ class Context(object): self._name_task_id = '{}-{}'.format( self._name, self._task_id[:self.SHORT_TASK_ID_LEN]) + def split_host_name(self, name): + if (isinstance(name, six.string_types) + and self._host_name_separator in name): + return tuple(name.split(self._host_name_separator, 1)) + return None, None + @property def name(self): if self._flags.no_setup or self._flags.no_teardown: @@ -76,6 +74,10 @@ class Context(object): def assigned_name(self): return self._name + @property + def host_name_separator(self): + return self._host_name_separator + @staticmethod def get_cls(context_type): """Return class of specified type.""" diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py index 0d1dfb86f..0640d2c78 100644 --- a/yardstick/benchmark/contexts/heat.py +++ b/yardstick/benchmark/contexts/heat.py @@ -466,7 +466,7 @@ class HeatContext(Context): with attribute name mapping when using external heat templates """ if isinstance(attr_name, collections.Mapping): - node_name, cname = self.split_name(attr_name['name']) + node_name, cname = self.split_host_name(attr_name['name']) if cname is None or cname != self.name: return None diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py index 4bea991ea..82435d40c 100644 --- a/yardstick/benchmark/contexts/kubernetes.py +++ b/yardstick/benchmark/contexts/kubernetes.py @@ -33,8 +33,7 @@ class KubernetesContext(Context): self.key_path = '' self.public_key_path = '' self.template = None - - super(KubernetesContext, self).__init__() + super(KubernetesContext, self).__init__(host_name_separator='-') def init(self, attrs): super(KubernetesContext, self).init(attrs) diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py index fa619a9aa..93888ef41 100644 --- a/yardstick/benchmark/contexts/node.py +++ b/yardstick/benchmark/contexts/node.py @@ -139,7 +139,7 @@ class NodeContext(Context): """lookup server info by name from context attr_name: a name for a server listed in nodes config file """ - node_name, name = self.split_name(attr_name) + node_name, name = self.split_host_name(attr_name) if name is None or self.name != name: return None diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py index 30b685eec..ccb0f8f99 100644 --- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py +++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py @@ -305,7 +305,7 @@ class OvsDpdkContext(Context): Keyword arguments: attr_name -- A name for a server listed in nodes config file """ - node_name, name = self.split_name(attr_name) + node_name, name = self.split_host_name(attr_name) if name is None or self.name != name: return None diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py index 5db419e6a..c5438b3cf 100644 --- a/yardstick/benchmark/contexts/standalone/sriov.py +++ b/yardstick/benchmark/contexts/standalone/sriov.py @@ -115,7 +115,7 @@ class SriovContext(Context): Keyword arguments: attr_name -- A name for a server listed in nodes config file """ - node_name, name = self.split_name(attr_name) + node_name, name = self.split_host_name(attr_name) if name is None or self.name != name: return None @@ -197,10 +197,10 @@ class SriovContext(Context): slot = index + idx + 10 vf['vpci'] = \ "{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function) - model.Libvirt.add_sriov_interfaces( - vf['vpci'], vf['vf_pci']['vf_pci'], vf['mac'], str(cfg)) self.connection.execute("ifconfig %s up" % vf['interface']) self.connection.execute(vf_spoofchk.format(vf['interface'])) + return model.Libvirt.add_sriov_interfaces( + vf['vpci'], vf['vf_pci']['vf_pci'], vf['mac'], str(cfg)) def setup_sriov_context(self): nodes = [] @@ -223,7 +223,7 @@ class SriovContext(Context): network_ports = collections.OrderedDict( {k: v for k, v in vnf["network_ports"].items() if k != 'mgmt'}) for idx, vfs in enumerate(network_ports.values()): - self._enable_interfaces(index, idx, vfs, cfg) + xml_str = self._enable_interfaces(index, idx, vfs, xml_str) # copy xml to target... model.Libvirt.write_file(cfg, xml_str) diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 697cc007f..f050e8d0f 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -619,27 +619,22 @@ class TaskParser(object): # pragma: no cover nodes: tg__0: tg_0.yardstick vnf__0: vnf_0.yardstick + + NOTE: in Kubernetes context, the separator character between the server + name and the context name is "-": + scenario: + host: host-k8s + target: target-k8s """ def qualified_name(name): - try: - # for openstack - node_name, context_name = name.split('.') - sep = '.' - except ValueError: - # for kubernetes, some kubernetes resources don't support - # name format like 'xxx.xxx', so we use '-' instead - # need unified later - node_name, context_name = name.split('-') - sep = '-' - - try: - ctx = next((context for context in contexts - if context.assigned_name == context_name)) - except StopIteration: - raise y_exc.ScenarioConfigContextNameNotFound( - context_name=context_name) - - return '{}{}{}'.format(node_name, sep, ctx.name) + for context in contexts: + host_name, ctx_name = context.split_host_name(name) + if context.assigned_name == ctx_name: + return '{}{}{}'.format(host_name, + context.host_name_separator, + context.name) + + raise y_exc.ScenarioConfigContextNameNotFound(host_name=name) if 'host' in scenario: scenario['host'] = qualified_name(scenario['host']) diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py index be2fa3f3b..1c3ea1f8d 100644 --- a/yardstick/benchmark/scenarios/networking/vnf_generic.py +++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py @@ -13,20 +13,19 @@ # limitations under the License. import copy -import logging -import time - import ipaddress from itertools import chain +import logging import os import sys +import time import six import yaml from yardstick.benchmark.scenarios import base as scenario_base -from yardstick.error import IncorrectConfig from yardstick.common.constants import LOG_DIR +from yardstick.common import exceptions from yardstick.common.process import terminate_children from yardstick.common import utils from yardstick.network_services.collector.subscriber import Collector @@ -190,8 +189,9 @@ class NetworkServiceTestCase(scenario_base.Scenario): try: node0_data, node1_data = vld["vnfd-connection-point-ref"] except (ValueError, TypeError): - raise IncorrectConfig("Topology file corrupted, " - "wrong endpoint count for connection") + raise exceptions.IncorrectConfig( + error_msg='Topology file corrupted, wrong endpoint count ' + 'for connection') node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"]) node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"]) @@ -237,15 +237,17 @@ class NetworkServiceTestCase(scenario_base.Scenario): except KeyError: LOG.exception("") - raise IncorrectConfig("Required interface not found, " - "topology file corrupted") + raise exceptions.IncorrectConfig( + error_msg='Required interface not found, topology file ' + 'corrupted') for vld in self.topology['vld']: try: node0_data, node1_data = vld["vnfd-connection-point-ref"] except (ValueError, TypeError): - raise IncorrectConfig("Topology file corrupted, " - "wrong endpoint count for connection") + raise exceptions.IncorrectConfig( + error_msg='Topology file corrupted, wrong endpoint count ' + 'for connection') node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"]) node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"]) @@ -330,8 +332,9 @@ class NetworkServiceTestCase(scenario_base.Scenario): except StopIteration: pass - raise IncorrectConfig("No implementation for %s found in %s" % - (expected_name, classes_found)) + message = ('No implementation for %s found in %s' + % (expected_name, classes_found)) + raise exceptions.IncorrectConfig(error_msg=message) @staticmethod def create_interfaces_from_node(vnfd, node): @@ -441,7 +444,7 @@ class NetworkServiceTestCase(scenario_base.Scenario): traffic_gen.listen_traffic(self.traffic_profile) # register collector with yardstick for KPI collection. - self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile) + self.collector = Collector(self.vnfs) self.collector.start() # Start the actual traffic diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py index fb4d33a59..f5e923f4d 100644 --- a/yardstick/common/exceptions.py +++ b/yardstick/common/exceptions.py @@ -21,6 +21,16 @@ class ProcessExecutionError(RuntimeError): self.returncode = returncode +class ErrorClass(object): + + def __init__(self, *args, **kwargs): + if 'test' not in kwargs: + raise RuntimeError + + def __getattr__(self, item): + raise AttributeError + + class YardstickException(Exception): """Base Yardstick Exception. @@ -54,6 +64,10 @@ class YardstickException(Exception): return False +class ResourceCommandError(YardstickException): + message = 'Command: "%(command)s" Failed, stderr: "%(stderr)s"' + + class FunctionNotImplemented(YardstickException): message = ('The function "%(function_name)s" is not implemented in ' '"%(class_name)" class.') @@ -112,8 +126,28 @@ class LibvirtCreateError(YardstickException): message = 'Error creating the virtual machine. Error: %(error)s.' +class SSHError(YardstickException): + message = '%(error_msg)s' + + +class SSHTimeout(SSHError): + pass + + +class IncorrectConfig(YardstickException): + message = '%(error_msg)s' + + +class IncorrectSetup(YardstickException): + message = '%(error_msg)s' + + +class IncorrectNodeSetup(IncorrectSetup): + pass + + class ScenarioConfigContextNameNotFound(YardstickException): - message = 'Context name "%(context_name)s" not found' + message = 'Context for host name "%(host_name)s" not found' class StackCreationInterrupt(YardstickException): diff --git a/yardstick/error.py b/yardstick/error.py deleted file mode 100644 index 9b84de1af..000000000 --- a/yardstick/error.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2016-2017 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class SSHError(Exception): - """Class handles ssh connection error exception""" - pass - - -class SSHTimeout(SSHError): - """Class handles ssh connection timeout exception""" - pass - - -class IncorrectConfig(Exception): - """Class handles incorrect configuration during setup""" - pass - - -class IncorrectSetup(Exception): - """Class handles incorrect setup during setup""" - pass - - -class IncorrectNodeSetup(IncorrectSetup): - """Class handles incorrect setup during setup""" - pass - - -class ErrorClass(object): - - def __init__(self, *args, **kwargs): - if 'test' not in kwargs: - raise RuntimeError - - def __getattr__(self, item): - raise AttributeError diff --git a/yardstick/network_services/collector/subscriber.py b/yardstick/network_services/collector/subscriber.py index 7e18302eb..322b3f5a2 100644 --- a/yardstick/network_services/collector/subscriber.py +++ b/yardstick/network_services/collector/subscriber.py @@ -14,42 +14,29 @@ """This module implements stub for publishing results in yardstick format.""" import logging -from yardstick.network_services.nfvi.resource import ResourceProfile -from yardstick.network_services.utils import get_nsb_option - LOG = logging.getLogger(__name__) class Collector(object): """Class that handles dictionary of results in yardstick-plot format.""" - def __init__(self, vnfs, nodes, traffic_profile, timeout=3600): + def __init__(self, vnfs): super(Collector, self).__init__() - self.traffic_profile = traffic_profile self.vnfs = vnfs - self.nodes = nodes - self.timeout = timeout - self.bin_path = get_nsb_option('bin_path', '') - self.resource_profiles = {node_name: ResourceProfile.make_from_node(node, self.timeout) - for node_name, node in self.nodes.items() - if node.get("collectd")} def start(self): - """Nothing to do, yet""" - for resource in self.resource_profiles.values(): - resource.initiate_systemagent(self.bin_path) - resource.start() - resource.amqp_process_for_nfvi_kpi() + for vnf in self.vnfs: + vnf.start_collect() def stop(self): - """Nothing to do, yet""" - for resource in self.resource_profiles.values(): - resource.stop() + for vnf in self.vnfs: + vnf.stop_collect() def get_kpi(self): """Returns dictionary of results in yardstick-plot format - :return: + :return: (dict) dictionary of kpis collected from the VNFs; + the keys are the names of the VNFs. """ results = {} for vnf in self.vnfs: @@ -58,17 +45,4 @@ class Collector(object): LOG.debug("collect KPI for %s", vnf.name) results[vnf.name] = vnf.collect_kpi() - for node_name, resource in self.resource_profiles.items(): - # Result example: - # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }} - LOG.debug("collect KPI for %s", node_name) - if resource.check_if_system_agent_running("collectd")[0] != 0: - continue - - try: - results[node_name] = {"core": resource.amqp_collect_nfvi_kpi()} - LOG.debug("%s collect KPIs %s", node_name, results[node_name]['core']) - # NOTE(elfoley): catch a more specific error - except Exception as exc: # pylint: disable=broad-except - LOG.exception(exc) return results diff --git a/yardstick/network_services/helpers/dpdkbindnic_helper.py b/yardstick/network_services/helpers/dpdkbindnic_helper.py index 05b822c2e..1c74355ef 100644 --- a/yardstick/network_services/helpers/dpdkbindnic_helper.py +++ b/yardstick/network_services/helpers/dpdkbindnic_helper.py @@ -18,12 +18,9 @@ import re from collections import defaultdict from itertools import chain +from yardstick.common import exceptions from yardstick.common.utils import validate_non_string_sequence -from yardstick.error import IncorrectConfig -from yardstick.error import IncorrectSetup -from yardstick.error import IncorrectNodeSetup -from yardstick.error import SSHTimeout -from yardstick.error import SSHError + NETWORK_KERNEL = 'network_kernel' NETWORK_DPDK = 'network_dpdk' @@ -51,7 +48,7 @@ class DpdkInterface(object): try: assert self.local_mac except (AssertionError, KeyError): - raise IncorrectConfig + raise exceptions.IncorrectConfig(error_msg='') @property def local_mac(self): @@ -98,10 +95,12 @@ class DpdkInterface(object): # if we don't find all the keys then don't update pass - except (IncorrectNodeSetup, SSHError, SSHTimeout): - raise IncorrectConfig( - "Unable to probe missing interface fields '%s', on node %s " - "SSH Error" % (', '.join(self.missing_fields), self.dpdk_node.node_key)) + except (exceptions.IncorrectNodeSetup, exceptions.SSHError, + exceptions.SSHTimeout): + message = ('Unable to probe missing interface fields "%s", on ' + 'node %s SSH Error' % (', '.join(self.missing_fields), + self.dpdk_node.node_key)) + raise exceptions.IncorrectConfig(error_msg=message) class DpdkNode(object): @@ -118,11 +117,12 @@ class DpdkNode(object): try: self.dpdk_interfaces = {intf['name']: DpdkInterface(self, intf['virtual-interface']) for intf in self.interfaces} - except IncorrectConfig: + except exceptions.IncorrectConfig: template = "MAC address is required for all interfaces, missing on: {}" errors = (intf['name'] for intf in self.interfaces if 'local_mac' not in intf['virtual-interface']) - raise IncorrectSetup(template.format(", ".join(errors))) + raise exceptions.IncorrectSetup( + error_msg=template.format(", ".join(errors))) @property def dpdk_helper(self): @@ -176,7 +176,7 @@ class DpdkNode(object): self._probe_netdevs() try: self._probe_missing_values() - except IncorrectConfig: + except exceptions.IncorrectConfig: # ignore for now pass @@ -193,7 +193,7 @@ class DpdkNode(object): missing_fields) errors = "\n".join(errors) if errors: - raise IncorrectSetup(errors) + raise exceptions.IncorrectSetup(error_msg=errors) finally: self._dpdk_helper = None diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py index dc5c46a86..0c0bf223a 100644 --- a/yardstick/network_services/nfvi/resource.py +++ b/yardstick/network_services/nfvi/resource.py @@ -27,6 +27,7 @@ from oslo_config import cfg from oslo_utils.encodeutils import safe_decode from yardstick import ssh +from yardstick.common.exceptions import ResourceCommandError from yardstick.common.task_template import finalize_for_yaml from yardstick.common.utils import validate_non_string_sequence from yardstick.network_services.nfvi.collectd import AmqpConsumer @@ -249,45 +250,46 @@ class ResourceProfile(object): if status != 0: LOG.error("cannot find OVS socket %s", socket_path) + def _start_rabbitmq(self, connection): + # Reset amqp queue + LOG.debug("reset and setup amqp to collect data from collectd") + # ensure collectd.conf.d exists to avoid error/warning + cmd_list = ["sudo mkdir -p /etc/collectd/collectd.conf.d", + "sudo service rabbitmq-server restart", + "sudo rabbitmqctl stop_app", + "sudo rabbitmqctl reset", + "sudo rabbitmqctl start_app", + "sudo rabbitmqctl add_user admin admin", + "sudo rabbitmqctl authenticate_user admin admin", + "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'" + ] + for cmd in cmd_list: + exit_status, stdout, stderr = connection.execute(cmd) + if exit_status != 0: + raise ResourceCommandError(command=cmd, stderr=stderr) + + # check stdout for "sudo rabbitmqctl status" command + cmd = "sudo rabbitmqctl status" + _, stdout, stderr = connection.execute(cmd) + if not re.search("RabbitMQ", stdout): + LOG.error("rabbitmqctl status don't have RabbitMQ in running apps") + raise ResourceCommandError(command=cmd, stderr=stderr) + def _start_collectd(self, connection, bin_path): LOG.debug("Starting collectd to collect NFVi stats") - connection.execute('sudo pkill -x -9 collectd') collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd") config_file_path = os.path.join(bin_path, "collectd", "etc") + self._prepare_collectd_conf(config_file_path) + + connection.execute('sudo pkill -x -9 collectd') exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0] if exit_status != 0: LOG.warning("%s is not present disabling", collectd_path) - # disable auto-provisioning because it requires Internet access - # collectd_installer = os.path.join(bin_path, "collectd.sh") - # provision_tool(connection, collectd) - # http_proxy = os.environ.get('http_proxy', '') - # https_proxy = os.environ.get('https_proxy', '') - # connection.execute("sudo %s '%s' '%s'" % ( - # collectd_installer, http_proxy, https_proxy)) return if "ovs_stats" in self.plugins: self._setup_ovs_stats(connection) LOG.debug("Starting collectd to collect NFVi stats") - # ensure collectd.conf.d exists to avoid error/warning - connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d") - self._prepare_collectd_conf(config_file_path) - - # Reset amqp queue - LOG.debug("reset and setup amqp to collect data from collectd") - connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*") - connection.execute("sudo service rabbitmq-server start") - connection.execute("sudo rabbitmqctl stop_app") - connection.execute("sudo rabbitmqctl reset") - connection.execute("sudo rabbitmqctl start_app") - connection.execute("sudo service rabbitmq-server restart") - - LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd") - connection.execute("sudo rabbitmqctl delete_user guest") - connection.execute("sudo rabbitmqctl add_user admin admin") - connection.execute("sudo rabbitmqctl authenticate_user admin admin") - connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'") - LOG.debug("Start collectd service..... %s second timeout", self.timeout) # intel_pmu plug requires large numbers of files open, so try to set # ulimit -n to a large value @@ -299,9 +301,10 @@ class ResourceProfile(object): """ Start system agent for NFVi collection on host """ if self.enable: try: + self._start_rabbitmq(self.connection) self._start_collectd(self.connection, bin_path) - except Exception: - LOG.exception("Exception during collectd start") + except ResourceCommandError as e: + LOG.exception("Exception during collectd and rabbitmq start: %s", str(e)) raise def start(self): diff --git a/yardstick/network_services/traffic_profile/http_ixload.py b/yardstick/network_services/traffic_profile/http_ixload.py index 348056551..6cbdb8ab2 100644 --- a/yardstick/network_services/traffic_profile/http_ixload.py +++ b/yardstick/network_services/traffic_profile/http_ixload.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import -from __future__ import print_function - import sys import os import logging @@ -27,22 +24,14 @@ try: except ImportError: import json as jsonutils - -class ErrorClass(object): - - def __init__(self, *args, **kwargs): - if 'test' not in kwargs: - raise RuntimeError - - def __getattr__(self, item): - raise AttributeError - +from yardstick.common import exceptions try: from IxLoad import IxLoad, StatCollectorUtils except ImportError: - IxLoad = ErrorClass - StatCollectorUtils = ErrorClass + IxLoad = exceptions.ErrorClass + StatCollectorUtils = exceptions.ErrorClass + LOG = logging.getLogger(__name__) CSV_FILEPATH_NAME = 'IxL_statResults.csv' @@ -93,7 +82,7 @@ def validate_non_string_sequence(value, default=None, raise_exc=None): if isinstance(value, collections.Sequence) and not isinstance(value, str): return value if raise_exc: - raise raise_exc + raise raise_exc # pylint: disable=raising-bad-type return default @@ -218,7 +207,7 @@ class IXLOADHttpTest(object): # ---- Remap ports ---- try: self.reassign_ports(test, repository, self.ports_to_reassign) - except Exception: + except Exception: # pylint: disable=broad-except LOG.exception("Exception occurred during reassign_ports") # ----------------------------------------------------------------------- diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py index f3cafef7a..d9719eb4e 100644 --- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py @@ -22,7 +22,7 @@ LOG = logging.getLogger(__name__) # ACL should work the same on all systems, we can provide the binary ACL_PIPELINE_COMMAND = \ - 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}' + 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}' ACL_COLLECT_KPI = r"""\ ACL TOTAL:[^p]+pkts_processed"?:\s(\d+),[^p]+pkts_drop"?:\s(\d+),[^p]+pkts_received"?:\s(\d+),""" diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py index a776b0989..9ceac3167 100644 --- a/yardstick/network_services/vnf_generic/vnf/base.py +++ b/yardstick/network_services/vnf_generic/vnf/base.py @@ -195,6 +195,18 @@ class GenericVNF(object): :return: {"kpi": value, "kpi2": value} """ + @abc.abstractmethod + def start_collect(self): + """Start KPI collection + :return: None + """ + + @abc.abstractmethod + def stop_collect(self): + """Stop KPI collection + :return: None + """ + @six.add_metaclass(abc.ABCMeta) class GenericTrafficGen(GenericVNF): @@ -254,3 +266,23 @@ class GenericTrafficGen(GenericVNF): :return: True/False """ pass + + def start_collect(self): + """Start KPI collection. + + Traffic measurements are always collected during injection. + + Optional. + + :return: True/False + """ + pass + + def stop_collect(self): + """Stop KPI collection. + + Optional. + + :return: True/False + """ + pass diff --git a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py index 53f73b4d7..bfe628f09 100644 --- a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py @@ -21,10 +21,10 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, Dpd LOG = logging.getLogger(__name__) # CGNAPT should work the same on all systems, we can provide the binary -CGNAPT_PIPELINE_COMMAND = 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}' +CGNAPT_PIPELINE_COMMAND = 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}' WAIT_FOR_STATIC_NAPT = 4 -CGNAPT_COLLECT_KPI = """\ +CGNAPT_COLLECT_KPI = r"""\ CG-NAPT(.*\n)*\ Received\s(\d+),\ Missed\s(\d+),\ diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py index d8b9625fb..16873611e 100644 --- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py @@ -59,6 +59,7 @@ class SetupEnvHelper(object): self.vnfd_helper = vnfd_helper self.ssh_helper = ssh_helper self.scenario_helper = scenario_helper + self.collectd_options = {} def build_config(self): raise NotImplementedError @@ -192,11 +193,20 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): port_nums = self.vnfd_helper.port_nums(ports) # create mask from all the dpdk port numbers ports_mask_hex = hex(sum(2 ** num for num in port_nums)) + + vnf_cfg = self.scenario_helper.vnf_cfg + lb_config = vnf_cfg.get('lb_config', 'SW') + worker_threads = vnf_cfg.get('worker_threads', 3) + hwlb = '' + if lb_config == 'HW': + hwlb = ' --hwlb %s' % worker_threads + self.pipeline_kwargs = { 'cfg_file': self.CFG_CONFIG, 'script': self.CFG_SCRIPT, 'port_mask_hex': ports_mask_hex, 'tool_path': tool_path, + 'hwlb': hwlb, } def setup_vnf_environment(self): @@ -224,12 +234,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): if exit_status == 0: return - def get_collectd_options(self): - options = self.scenario_helper.all_options.get("collectd", {}) - # override with specific node settings - options.update(self.scenario_helper.options.get("collectd", {})) - return options - def _setup_resources(self): # what is this magic? how do we know which socket is for which port? # what about quad-socket? @@ -242,11 +246,11 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): # this won't work because we don't have DPDK port numbers yet ports = sorted(self.vnfd_helper.interfaces, key=self.vnfd_helper.port_num) port_names = (intf["name"] for intf in ports) - collectd_options = self.get_collectd_options() - plugins = collectd_options.get("plugins", {}) + plugins = self.collectd_options.get("plugins", {}) + interval = self.collectd_options.get("interval") # we must set timeout to be the same as the VNF otherwise KPIs will die before VNF return ResourceProfile(self.vnfd_helper.mgmt_interface, port_names=port_names, - plugins=plugins, interval=collectd_options.get("interval"), + plugins=plugins, interval=interval, timeout=self.scenario_helper.timeout) def _check_interface_fields(self): @@ -666,6 +670,7 @@ class SampleVNF(GenericVNF): pass def instantiate(self, scenario_cfg, context_cfg): + self._update_collectd_options(scenario_cfg, context_cfg) self.scenario_helper.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name]) @@ -677,6 +682,54 @@ class SampleVNF(GenericVNF): self.resource_helper.setup() self._start_vnf() + def _update_collectd_options(self, scenario_cfg, context_cfg): + """Update collectd configuration options + This function retrieves all collectd options contained in the test case + + definition builds a single dictionary combining them. The following fragment + represents a test case with the collectd options and priorities (1 highest, 3 lowest): + --- + schema: yardstick:task:0.1 + scenarios: + - type: NSPerf + nodes: + tg__0: trafficgen_1.yardstick + vnf__0: vnf.yardstick + options: + collectd: + <options> # COLLECTD priority 3 + vnf__0: + collectd: + plugins: + load + <options> # COLLECTD priority 2 + context: + type: Node + name: yardstick + nfvi_type: baremetal + file: /etc/yardstick/nodes/pod_ixia.yaml # COLLECTD priority 1 + """ + scenario_options = scenario_cfg.get('options', {}) + generic_options = scenario_options.get('collectd', {}) + scenario_node_options = scenario_options.get(self.name, {})\ + .get('collectd', {}) + context_node_options = context_cfg.get('nodes', {})\ + .get(self.name, {}).get('collectd', {}) + + options = generic_options + self._update_options(options, scenario_node_options) + self._update_options(options, context_node_options) + + self.setup_helper.collectd_options = options + + def _update_options(self, options, additional_options): + """Update collectd options and plugins dictionary""" + for k, v in additional_options.items(): + if isinstance(v, dict) and k in options: + options[k].update(v) + else: + options[k] = v + def wait_for_instantiate(self): buf = [] time.sleep(self.WAIT_TIME) # Give some time for config to load @@ -692,7 +745,6 @@ class SampleVNF(GenericVNF): LOG.info("%s VNF is up and running.", self.APP_NAME) self._vnf_up_post() self.queue_wrapper.clear() - self.resource_helper.start_collect() return self._vnf_process.exitcode if "PANIC" in message: @@ -705,6 +757,12 @@ class SampleVNF(GenericVNF): # by other VNF output self.q_in.put('\r\n') + def start_collect(self): + self.resource_helper.start_collect() + + def stop_collect(self): + self.resource_helper.stop_collect() + def _build_run_kwargs(self): self.run_kwargs = { 'stdin': self.queue_wrapper, diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py index 265d0b7a9..94ab69891 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py @@ -12,15 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import - import time import os import logging import sys +from yardstick.common import exceptions from yardstick.common import utils -from yardstick import error from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper @@ -36,7 +34,7 @@ sys.path.append(IXNET_LIB) try: from IxNet import IxNextgen except ImportError: - IxNextgen = error.ErrorClass + IxNextgen = exceptions.ErrorClass class IxiaRfc2544Helper(Rfc2544ResourceHelper): diff --git a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py index 61e99855f..3ba1f91b7 100644 --- a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py @@ -21,7 +21,7 @@ from yardstick.network_services.yang_model import YangModel LOG = logging.getLogger(__name__) # vFW should work the same on all systems, we can provide the binary -FW_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}""" +FW_PIPELINE_COMMAND = "sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}" FW_COLLECT_KPI = (r"""VFW TOTAL:[^p]+pkts_received"?:\s(\d+),[^p]+pkts_fw_forwarded"?:\s(\d+),""" r"""[^p]+pkts_drop_fw"?:\s(\d+),\s""") diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py index 077ce2385..0067f6bf9 100644 --- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py @@ -31,7 +31,7 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, Dpd LOG = logging.getLogger(__name__) -VPE_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}""" +VPE_PIPELINE_COMMAND = "sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}" VPE_COLLECT_KPI = """\ Pkts in:\\s(\\d+)\r\n\ diff --git a/yardstick/ssh.py b/yardstick/ssh.py index d7adc0d05..6b5e6faf4 100644 --- a/yardstick/ssh.py +++ b/yardstick/ssh.py @@ -62,15 +62,13 @@ Eventlet: sshclient = eventlet.import_patched("yardstick.ssh") """ -from __future__ import absolute_import -import os import io +import logging +import os +import re import select import socket import time -import re - -import logging import paramiko from chainmap import ChainMap @@ -78,6 +76,7 @@ from oslo_utils import encodeutils from scp import SCPClient import six +from yardstick.common import exceptions from yardstick.common.utils import try_int, NON_NONE_DEFAULT, make_dict_from_map from yardstick.network_services.utils import provision_tool @@ -90,12 +89,12 @@ def convert_key_to_str(key): return k.getvalue() -class SSHError(Exception): - pass - - -class SSHTimeout(SSHError): - pass +# class SSHError(Exception): +# pass +# +# +# class SSHTimeout(SSHError): +# pass class SSH(object): @@ -193,7 +192,7 @@ class SSH(object): return key_class.from_private_key(key) except paramiko.SSHException as e: errors.append(e) - raise SSHError("Invalid pkey: %s" % errors) + raise exceptions.SSHError(error_msg='Invalid pkey: %s' % errors) @property def is_connected(self): @@ -214,10 +213,10 @@ class SSH(object): return self._client except Exception as e: message = ("Exception %(exception_type)s was raised " - "during connect. Exception value is: %(exception)r") + "during connect. Exception value is: %(exception)r" % + {"exception": e, "exception_type": type(e)}) self._client = False - raise SSHError(message % {"exception": e, - "exception_type": type(e)}) + raise exceptions.SSHError(error_msg=message) def _make_dict(self): return { @@ -334,11 +333,11 @@ class SSH(object): break if timeout and (time.time() - timeout) > start_time: - args = {"cmd": cmd, "host": self.host} - raise SSHTimeout("Timeout executing command " - "'%(cmd)s' on host %(host)s" % args) + message = ('Timeout executing command %(cmd)s on host %(host)s' + % {"cmd": cmd, "host": self.host}) + raise exceptions.SSHTimeout(error_msg=message) if e: - raise SSHError("Socket error.") + raise exceptions.SSHError(error_msg='Socket error') exit_status = session.recv_exit_status() if exit_status != 0 and raise_on_error: @@ -346,7 +345,7 @@ class SSH(object): details = fmt % {"cmd": cmd, "status": exit_status} if stderr_data: details += " Last stderr data: '%s'." % stderr_data - raise SSHError(details) + raise exceptions.SSHError(error_msg=details) return exit_status def execute(self, cmd, stdin=None, timeout=3600): @@ -377,11 +376,12 @@ class SSH(object): while True: try: return self.execute("uname") - except (socket.error, SSHError) as e: + except (socket.error, exceptions.SSHError) as e: self.log.debug("Ssh is still unavailable: %r", e) time.sleep(interval) if time.time() > end_time: - raise SSHTimeout("Timeout waiting for '%s'" % self.host) + raise exceptions.SSHTimeout( + error_msg='Timeout waiting for "%s"' % self.host) def put(self, files, remote_path=b'.', recursive=False): client = self._get_client() @@ -486,11 +486,12 @@ class AutoConnectSSH(SSH): while True: try: return self._get_client() - except (socket.error, SSHError) as e: + except (socket.error, exceptions.SSHError) as e: self.log.debug("Ssh is still unavailable: %r", e) time.sleep(interval) if time.time() > end_time: - raise SSHTimeout("Timeout waiting for '%s'" % self.host) + raise exceptions.SSHTimeout( + error_msg='Timeout waiting for "%s"' % self.host) def drop_connection(self): """ Don't close anything, just force creation of a new client """ diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py index e70ab0ae8..b426985be 100644 --- a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py +++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py @@ -242,18 +242,19 @@ class SriovContextTestCase(unittest.TestCase): self.assertIsNone(self.sriov.configure_nics_for_sriov()) @mock.patch.object(ssh, 'SSH', return_value=(0, "a", "")) - @mock.patch.object(model, 'Libvirt') - def test__enable_interfaces(self, mock_libvirt, mock_ssh): - # pylint: disable=unused-argument - # NOTE(ralonsoh): the pylint exception should be removed. + @mock.patch.object(model.Libvirt, 'add_sriov_interfaces', + return_value='out_xml') + def test__enable_interfaces(self, mock_add_sriov, mock_ssh): self.sriov.vm_deploy = True self.sriov.connection = mock_ssh self.sriov.vm_names = ['vm_0', 'vm_1'] self.sriov.drivers = [] self.sriov.networks = self.NETWORKS - self.sriov.get_vf_data = mock.Mock(return_value="") - self.assertIsNone(self.sriov._enable_interfaces( - 0, 0, ["private_0"], 'test')) + self.assertEqual( + 'out_xml', + self.sriov._enable_interfaces(0, 0, ['private_0'], 'test')) + mock_add_sriov.assert_called_once_with( + '0000:00:0a.0', 0, self.NETWORKS['private_0']['mac'], 'test') @mock.patch.object(model.Libvirt, 'build_vm_xml') @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete') @@ -282,7 +283,9 @@ class SriovContextTestCase(unittest.TestCase): mock_build_vm_xml.return_value = (xml_out, '00:00:00:00:00:01') with mock.patch.object(self.sriov, 'vnf_node') as mock_vnf_node, \ - mock.patch.object(self.sriov, '_enable_interfaces'): + mock.patch.object(self.sriov, '_enable_interfaces') as \ + mock_enable_interfaces: + mock_enable_interfaces.return_value = 'out_xml' mock_vnf_node.generate_vnf_instance = mock.Mock( return_value='node') nodes_out = self.sriov.setup_sriov_context() @@ -294,7 +297,10 @@ class SriovContextTestCase(unittest.TestCase): connection, 'flavor', vm_name, 0) mock_create_vm.assert_called_once_with(connection, cfg) mock_check.assert_called_once_with(vm_name, connection) - mock_write_file.assert_called_once_with(cfg, xml_out) + mock_write_file.assert_called_once_with(cfg, 'out_xml') + mock_enable_interfaces.assert_has_calls([ + mock.call(0, mock.ANY, ['private_0'], mock.ANY), + mock.call(0, mock.ANY, ['public_0'], mock.ANY)], any_order=True) def test__get_vf_data(self): with mock.patch("yardstick.ssh.SSH") as ssh: diff --git a/yardstick/tests/unit/benchmark/contexts/test_base.py b/yardstick/tests/unit/benchmark/contexts/test_base.py index 153c6a527..1311317ac 100644 --- a/yardstick/tests/unit/benchmark/contexts/test_base.py +++ b/yardstick/tests/unit/benchmark/contexts/test_base.py @@ -12,12 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest - from yardstick.benchmark.contexts import base +from yardstick.tests.unit import base as ut_base + + +class DummyContextClass(base.Context): + + def _get_network(self, *args): + pass + + def _get_server(self, *args): + pass + + def deploy(self): + pass + + def undeploy(self): + pass -class FlagsTestCase(unittest.TestCase): +class FlagsTestCase(ut_base.BaseUnitTestCase): def setUp(self): self.flags = base.Flags() @@ -41,3 +55,32 @@ class FlagsTestCase(unittest.TestCase): self.flags.parse(foo=42) with self.assertRaises(AttributeError): _ = self.flags.foo + + +class ContextTestCase(ut_base.BaseUnitTestCase): + + @staticmethod + def _remove_ctx(ctx_obj): + if ctx_obj in base.Context.list: + base.Context.list.remove(ctx_obj) + + def test_split_host_name(self): + ctx_obj = DummyContextClass() + self.addCleanup(self._remove_ctx, ctx_obj) + config_name = 'host_name.ctx_name' + self.assertEqual(('host_name', 'ctx_name'), + ctx_obj.split_host_name(config_name)) + + def test_split_host_name_wrong_separator(self): + ctx_obj = DummyContextClass() + self.addCleanup(self._remove_ctx, ctx_obj) + config_name = 'host_name-ctx_name' + self.assertEqual((None, None), + ctx_obj.split_host_name(config_name)) + + def test_split_host_name_other_separator(self): + ctx_obj = DummyContextClass(host_name_separator='-') + self.addCleanup(self._remove_ctx, ctx_obj) + config_name = 'host_name-ctx_name' + self.assertEqual(('host_name', 'ctx_name'), + ctx_obj.split_host_name(config_name)) diff --git a/yardstick/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py index 625f97bf4..8febfc9de 100644 --- a/yardstick/tests/unit/benchmark/contexts/test_heat.py +++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py @@ -13,29 +13,26 @@ import logging import os import mock -import unittest +from yardstick import ssh from yardstick.benchmark.contexts import base from yardstick.benchmark.contexts import heat from yardstick.benchmark.contexts import model from yardstick.common import constants as consts from yardstick.common import exceptions as y_exc -from yardstick import ssh +from yardstick.tests.unit import base as ut_base LOG = logging.getLogger(__name__) -class HeatContextTestCase(unittest.TestCase): - - def __init__(self, *args, **kwargs): - super(HeatContextTestCase, self).__init__(*args, **kwargs) - self.name_iter = ('vnf{:03}'.format(x) for x in count(0, step=3)) +class HeatContextTestCase(ut_base.BaseUnitTestCase): def setUp(self): self.test_context = heat.HeatContext() self.addCleanup(self._remove_contexts) self.mock_context = mock.Mock(spec=heat.HeatContext()) + self.name_iter = ('vnf{:03}'.format(x) for x in count(0, step=3)) def _remove_contexts(self): if self.test_context in self.test_context.list: diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py index 9bfbf0752..2885dc6fb 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py @@ -20,11 +20,11 @@ import mock import unittest from yardstick import tests +from yardstick.common import exceptions from yardstick.common import utils from yardstick.network_services.collector.subscriber import Collector from yardstick.network_services.traffic_profile import base from yardstick.network_services.vnf_generic import vnfdgen -from yardstick.error import IncorrectConfig from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen from yardstick.network_services.vnf_generic.vnf.base import GenericVNF @@ -423,7 +423,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): with mock.patch.dict(sys.modules, tests.STL_MOCKS): self.assertIsNotNone(self.s.get_vnf_impl(vnfd)) - with self.assertRaises(vnf_generic.IncorrectConfig) as raised: + with self.assertRaises(exceptions.IncorrectConfig) as raised: self.s.get_vnf_impl('NonExistentClass') exc_str = str(raised.exception) @@ -465,7 +465,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): cfg_patch = mock.patch.object(self.s, 'context_cfg', cfg) with cfg_patch: - with self.assertRaises(IncorrectConfig): + with self.assertRaises(exceptions.IncorrectConfig): self.s.map_topology_to_infrastructure() def test_map_topology_to_infrastructure_config_invalid(self): @@ -482,7 +482,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): config_patch = mock.patch.object(self.s, 'context_cfg', cfg) with config_patch: - with self.assertRaises(IncorrectConfig): + with self.assertRaises(exceptions.IncorrectConfig): self.s.map_topology_to_infrastructure() def test__resolve_topology_invalid_config(self): @@ -496,7 +496,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): for interface in self.tg__1['interfaces'].values(): del interface['local_mac'] - with self.assertRaises(vnf_generic.IncorrectConfig) as raised: + with self.assertRaises(exceptions.IncorrectConfig) as raised: self.s._resolve_topology() self.assertIn('not found', str(raised.exception)) @@ -509,7 +509,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): self.s.topology["vld"][0]['vnfd-connection-point-ref'].append( self.s.topology["vld"][0]['vnfd-connection-point-ref'][0]) - with self.assertRaises(vnf_generic.IncorrectConfig) as raised: + with self.assertRaises(exceptions.IncorrectConfig) as raised: self.s._resolve_topology() self.assertIn('wrong endpoint count', str(raised.exception)) @@ -518,7 +518,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): self.s.topology["vld"][0]['vnfd-connection-point-ref'] = \ self.s.topology["vld"][0]['vnfd-connection-point-ref'][:1] - with self.assertRaises(vnf_generic.IncorrectConfig) as raised: + with self.assertRaises(exceptions.IncorrectConfig) as raised: self.s._resolve_topology() self.assertIn('wrong endpoint count', str(raised.exception)) diff --git a/yardstick/tests/unit/common/test_exceptions.py b/yardstick/tests/unit/common/test_exceptions.py new file mode 100644 index 000000000..884015536 --- /dev/null +++ b/yardstick/tests/unit/common/test_exceptions.py @@ -0,0 +1,28 @@ +# Copyright 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from yardstick.common import exceptions +from yardstick.tests.unit import base as ut_base + + +class ErrorClassTestCase(ut_base.BaseUnitTestCase): + + def test_init(self): + with self.assertRaises(RuntimeError): + exceptions.ErrorClass() + + def test_getattr(self): + error_instance = exceptions.ErrorClass(test='') + with self.assertRaises(AttributeError): + error_instance.get_name() diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py index 9540a39e8..c61a95f3b 100644 --- a/yardstick/tests/unit/common/test_utils.py +++ b/yardstick/tests/unit/common/test_utils.py @@ -20,9 +20,8 @@ import unittest import yardstick from yardstick import ssh -import yardstick.error -from yardstick.common import utils from yardstick.common import constants +from yardstick.common import utils class IterSubclassesTestCase(unittest.TestCase): @@ -987,14 +986,6 @@ class TestUtils(unittest.TestCase): with self.assertRaises(RuntimeError): utils.validate_non_string_sequence(1, raise_exc=RuntimeError) - def test_error_class(self): - with self.assertRaises(RuntimeError): - yardstick.error.ErrorClass() - - error_instance = yardstick.error.ErrorClass(test='') - with self.assertRaises(AttributeError): - error_instance.get_name() - class TestUtilsIpAddrMethods(unittest.TestCase): diff --git a/yardstick/tests/unit/network_services/collector/test_subscriber.py b/yardstick/tests/unit/network_services/collector/test_subscriber.py index 9b9649979..4ec2ea7a2 100644 --- a/yardstick/tests/unit/network_services/collector/test_subscriber.py +++ b/yardstick/tests/unit/network_services/collector/test_subscriber.py @@ -17,6 +17,7 @@ import unittest import mock from yardstick.network_services.collector import subscriber +from yardstick import ssh class MockVnfAprrox(object): @@ -37,57 +38,41 @@ class MockVnfAprrox(object): class CollectorTestCase(unittest.TestCase): - NODES = { - 'node1': {}, - 'node2': { - 'ip': '1.2.3.4', - 'collectd': { - 'plugins': {'abc': 12, 'def': 34}, - 'interval': 987, - }, - }, - } - TRAFFIC_PROFILE = { - 'key1': 'value1', - } - def setUp(self): vnf = MockVnfAprrox() - self.ssh_patch = mock.patch('yardstick.network_services.nfvi.resource.ssh', autospec=True) + vnf.start_collect = mock.Mock() + vnf.stop_collect = mock.Mock() + self.ssh_patch = mock.patch.object(ssh, 'AutoConnectSSH') mock_ssh = self.ssh_patch.start() mock_instance = mock.Mock() mock_instance.execute.return_value = 0, '', '' - mock_ssh.AutoConnectSSH.from_node.return_value = mock_instance - self.collector = subscriber.Collector([vnf], self.NODES, self.TRAFFIC_PROFILE, 1800) + mock_ssh.from_node.return_value = mock_instance + self.collector = subscriber.Collector([vnf]) def tearDown(self): self.ssh_patch.stop() def test___init__(self, *_): vnf = MockVnfAprrox() - collector = subscriber.Collector([vnf], {}, {}) + collector = subscriber.Collector([vnf]) self.assertEqual(len(collector.vnfs), 1) - self.assertEqual(collector.traffic_profile, {}) - - def test___init___with_data(self, *_): - self.assertEqual(len(self.collector.vnfs), 1) - self.assertDictEqual(self.collector.traffic_profile, self.TRAFFIC_PROFILE) - self.assertEqual(len(self.collector.resource_profiles), 1) - - def test___init___negative(self, *_): - pass def test_start(self, *_): - self.assertIsNone(self.collector.start()) + self.collector.start() + for vnf in self.collector.vnfs: + vnf.start_collect.assert_called_once() def test_stop(self, *_): self.assertIsNone(self.collector.stop()) + for vnf in self.collector.vnfs: + vnf.stop_collect.assert_called_once() def test_get_kpi(self, *_): result = self.collector.get_kpi() + self.assertEqual(1, len(result)) + self.assertEqual(4, len(result["vnf__1"])) self.assertEqual(result["vnf__1"]["pkt_in_up_stream"], 100) self.assertEqual(result["vnf__1"]["pkt_drop_up_stream"], 5) self.assertEqual(result["vnf__1"]["pkt_in_down_stream"], 50) self.assertEqual(result["vnf__1"]["pkt_drop_down_stream"], 40) - self.assertIn('node2', result) diff --git a/yardstick/tests/unit/service/test_environment.py b/yardstick/tests/unit/service/test_environment.py index 4af9a3958..be4882e30 100644 --- a/yardstick/tests/unit/service/test_environment.py +++ b/yardstick/tests/unit/service/test_environment.py @@ -6,16 +6,16 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import unittest import mock +from yardstick.common.exceptions import UnsupportedPodFormatError from yardstick.service.environment import Environment from yardstick.service.environment import AnsibleCommon -from yardstick.common.exceptions import UnsupportedPodFormatError +from yardstick.tests.unit import base as ut_base -class EnvironmentTestCase(unittest.TestCase): +class EnvironmentTestCase(ut_base.BaseUnitTestCase): def test_get_sut_info(self): pod_info = { @@ -31,11 +31,11 @@ class EnvironmentTestCase(unittest.TestCase): ] } - AnsibleCommon.gen_inventory_ini_dict = mock.MagicMock() - AnsibleCommon.get_sut_info = mock.MagicMock(return_value={'node1': {}}) - - env = Environment(pod=pod_info) - env.get_sut_info() + with mock.patch.object(AnsibleCommon, 'gen_inventory_ini_dict'), \ + mock.patch.object(AnsibleCommon, 'get_sut_info', + return_value={'node1': {}}): + env = Environment(pod=pod_info) + env.get_sut_info() def test_get_sut_info_pod_str(self): pod_info = 'nodes' @@ -43,7 +43,3 @@ class EnvironmentTestCase(unittest.TestCase): env = Environment(pod=pod_info) with self.assertRaises(UnsupportedPodFormatError): env.get_sut_info() - - -if __name__ == '__main__': - unittest.main() diff --git a/yardstick/tests/unit/test_ssh.py b/yardstick/tests/unit/test_ssh.py index f92290070..080d27837 100644 --- a/yardstick/tests/unit/test_ssh.py +++ b/yardstick/tests/unit/test_ssh.py @@ -13,10 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -# yardstick comment: this file is a modified copy of -# rally/tests/unit/common/test_sshutils.py - -from __future__ import absolute_import import os import socket import unittest @@ -26,8 +22,8 @@ from itertools import count import mock from oslo_utils import encodeutils +from yardstick.common import exceptions from yardstick import ssh -from yardstick.ssh import SSHError, SSHTimeout from yardstick.ssh import SSH from yardstick.ssh import AutoConnectSSH @@ -127,7 +123,7 @@ class SSHTestCase(unittest.TestCase): dss = mock_paramiko.dsskey.DSSKey rsa.from_private_key.side_effect = mock_paramiko.SSHException dss.from_private_key.side_effect = mock_paramiko.SSHException - self.assertRaises(ssh.SSHError, self.test_client._get_pkey, "key") + self.assertRaises(exceptions.SSHError, self.test_client._get_pkey, "key") @mock.patch("yardstick.ssh.six.moves.StringIO") @mock.patch("yardstick.ssh.paramiko") @@ -194,7 +190,7 @@ class SSHTestCase(unittest.TestCase): test_ssh = ssh.SSH("admin", "example.net", pkey="key") - with self.assertRaises(SSHError) as raised: + with self.assertRaises(exceptions.SSHError) as raised: test_ssh._get_client() self.assertEqual(mock_paramiko.SSHClient.call_count, 1) @@ -245,18 +241,18 @@ class SSHTestCase(unittest.TestCase): @mock.patch("yardstick.ssh.time") def test_wait_timeout(self, mock_time): mock_time.time.side_effect = [1, 50, 150] - self.test_client.execute = mock.Mock(side_effect=[ssh.SSHError, - ssh.SSHError, + self.test_client.execute = mock.Mock(side_effect=[exceptions.SSHError, + exceptions.SSHError, 0]) - self.assertRaises(ssh.SSHTimeout, self.test_client.wait) + self.assertRaises(exceptions.SSHTimeout, self.test_client.wait) self.assertEqual([mock.call("uname")] * 2, self.test_client.execute.mock_calls) @mock.patch("yardstick.ssh.time") def test_wait(self, mock_time): mock_time.time.side_effect = [1, 50, 100] - self.test_client.execute = mock.Mock(side_effect=[ssh.SSHError, - ssh.SSHError, + self.test_client.execute = mock.Mock(side_effect=[exceptions.SSHError, + exceptions.SSHError, 0]) self.test_client.wait() self.assertEqual([mock.call("uname")] * 3, @@ -333,7 +329,7 @@ class SSHRunTestCase(unittest.TestCase): def test_run_nonzero_status(self, mock_select): mock_select.select.return_value = ([], [], []) self.fake_session.recv_exit_status.return_value = 1 - self.assertRaises(ssh.SSHError, self.test_client.run, "cmd") + self.assertRaises(exceptions.SSHError, self.test_client.run, "cmd") self.assertEqual(1, self.test_client.run("cmd", raise_on_error=False)) @mock.patch("yardstick.ssh.select") @@ -401,7 +397,7 @@ class SSHRunTestCase(unittest.TestCase): def test_run_select_error(self, mock_select): self.fake_session.exit_status_ready.return_value = False mock_select.select.return_value = ([], [], [True]) - self.assertRaises(ssh.SSHError, self.test_client.run, "cmd") + self.assertRaises(exceptions.SSHError, self.test_client.run, "cmd") @mock.patch("yardstick.ssh.time") @mock.patch("yardstick.ssh.select") @@ -409,7 +405,7 @@ class SSHRunTestCase(unittest.TestCase): mock_time.time.side_effect = [1, 3700] mock_select.select.return_value = ([], [], []) self.fake_session.exit_status_ready.return_value = False - self.assertRaises(ssh.SSHTimeout, self.test_client.run, "cmd") + self.assertRaises(exceptions.SSHTimeout, self.test_client.run, "cmd") @mock.patch("yardstick.ssh.open", create=True) def test__put_file_shell(self, mock_open): @@ -529,9 +525,9 @@ class TestAutoConnectSSH(unittest.TestCase): auto_connect_ssh = AutoConnectSSH('user1', 'host1', wait=10) auto_connect_ssh._get_client = mock__get_client = mock.Mock() - mock__get_client.side_effect = SSHError + mock__get_client.side_effect = exceptions.SSHError - with self.assertRaises(SSHTimeout): + with self.assertRaises(exceptions.SSHTimeout): auto_connect_ssh._connect() self.assertEqual(mock_time.time.call_count, 12) |