diff options
46 files changed, 1924 insertions, 352 deletions
@@ -22,6 +22,7 @@ ross.b.brattain@intel.com chenjiankun1@huawei.com rodolfo.alonso.hernandez@intel.com emma.l.foley@intel.com +abhijit.sinha@intel.com Link to TSC approval: http://meetbot.opnfv.org/meetings/ Link to approval of additional submitters: @@ -74,6 +74,10 @@ committers: email: '14_ykl@tongji.edu.cn' company: 'tongji.edu.cn' id: 'tjuyinkanglin' + - name: 'Abhijit Sinha' + email: 'abhijit.sinha@intel.com' + company: 'intel.com' + id: 'abhijitsinha' tsc: # yamllint disable rule:line-length approval: 'http//meetbot.opnfv.org/meetings/' diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml new file mode 100644 index 000000000..5e43ee81e --- /dev/null +++ b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml @@ -0,0 +1,29 @@ +# Copyright (c) 2017-2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +# Ignore errors as VM can be destroyed without been undefined. +- name: Destroy old VMs + virt: + command: destroy + name: "{{ node_item.hostname }}" + when: node_item.hostname in virt_vms.list_vms + ignore_errors: yes + +# Ignore errors as VM can be running while undefined +- name: Undefine old VMs + virt: + command: undefine + name: "{{ node_item.hostname }}" + when: node_item.hostname in virt_vms.list_vms + ignore_errors: yes diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml index 5595cd501..e6c2c0229 100644 --- a/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml +++ b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml @@ -26,17 +26,10 @@ register: virt_vms - name: Destroy old VMs - virt: - command: destroy - name: "{{ item.hostname }}" - when: item.hostname in virt_vms.list_vms - with_items: "{{ infra_deploy_vars.nodes }}" - -- name: Undefine old VMs - virt: - command: undefine - name: "{{ item.hostname }}" - when: item.hostname in virt_vms.list_vms + include_tasks: delete_vm.yml + extra_vars: "{{ virt_vms }}" + loop_control: + loop_var: node_item with_items: "{{ infra_deploy_vars.nodes }}" - name: Delete old networks diff --git a/docker/Dockerfile b/docker/Dockerfile index ddd8dfaf8..46e52d557 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -26,7 +26,7 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick" \ RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean RUN easy_install -U setuptools==30.0.0 -RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 +RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 RUN mkdir -p ${REPOS_DIR} diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch index ca933514a..24e3952fb 100644 --- a/docker/Dockerfile.aarch64.patch +++ b/docker/Dockerfile.aarch64.patch @@ -39,7 +39,7 @@ index 2ee5b4c..23e5ea5 100644 +RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && \ + apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean RUN easy_install -U setuptools==30.0.0 - RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 + RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 @@ -43,8 +44,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf diff --git a/docs/testing/developer/devguide/devguide_nsb_prox.rst b/docs/testing/developer/devguide/devguide_nsb_prox.rst new file mode 100755 index 000000000..fc533b2cf --- /dev/null +++ b/docs/testing/developer/devguide/devguide_nsb_prox.rst @@ -0,0 +1,1226 @@ +Introduction +============= + +This document describes the steps to create a new NSB PROX test based on +existing PROX functionalities. NSB PROX provides is a simple approximation +of an operation and can be used to develop best practices and TCO models +for Telco customers, investigate the impact of new Intel compute, +network and storage technologies, characterize performance, and develop +optimal system architectures and configurations. + +.. contents:: + +Prerequisites +============= + +In order to integrate PROX tests into NSB, the following prerequisites are required. + +.. _`dpdk wiki page`: http://dpdk.org/ +.. _`yardstick wiki page`: https://wiki.opnfv.org/display/yardstick/ +.. _`Prox documentation`: https://01.org/intel-data-plane-performance-demonstrators/documentation/prox-documentation +.. _`openstack wiki page`: https://wiki.openstack.org/wiki/Main_Page +.. _`grafana getting started`: http://docs.grafana.org/guides/gettingstarted/ +.. _`opnfv grafana dashboard`: https://wiki.opnfv.org/display/yardstick/How+to+work+with+grafana+dashboard +.. _`Prox command line`: https://01.org/intel-data-plane-performance-demonstrators/documentation/prox-documentation#Command_line_options +.. _`grafana deployment`: https://wiki.opnfv.org/display/yardstick/How+to+deploy+InfluxDB+and+Grafana+locally +.. _`Prox options`: https://01.org/intel-data-plane-performance-demonstrators/documentation/prox-documentation#.5Beal_options.5D +.. _`NSB Installation`: http://artifacts.opnfv.org/yardstick/docs/userguide/index.html#document-09-installation + +* A working knowledge of Yardstick. See `yardstick wiki page`_. +* A working knowledge of PROX. See `Prox documentation`_. +* Knowledge of Openstack. See `openstack wiki page`_. +* Knowledge of how to use Grafana. See `grafana getting started`_. +* How to Deploy InfluxDB & Grafana. See `grafana deployment`_. +* How to use Grafana in OPNFV/Yardstick. See `opnfv grafana dashboard`_. +* How to install NSB. See `NSB Installation`_ + +Sample Prox Test Hardware Architecture +====================================== + +The following is a diagram of a sample NSB PROX Hardware Architecture +for both NSB PROX on Bare metal and on Openstack. + +In this example when running yardstick on baremetal, yardstick will +run on the deployment node, the generator will run on the deployment node +and the SUT(SUT) will run on the Controller Node. + + +.. image:: images/PROX_Hardware_Arch.png + :width: 800px + :alt: Sample NSB PROX Hard Architecture + +Prox Test Architecture +====================== + +In order to create a new test, one must understand the architecture of +the test. + +A NSB Prox test architecture is composed of: + +* A traffic generator. This provides blocks of data on 1 or more ports + to the SUT. + The traffic generator also consumes the result packets from the system + under test. +* A SUT consumes the packets generated by the packet + generator, and applies one or more tasks to the packets and return the + modified packets to the traffic generator. + + This is an example of a sample NSB PROX test architecture. + +.. image:: images/PROX_Software_Arch.png + :width: 800px + :alt: NSB PROX test Architecture + +This diagram is of a sample NSB PROX test application. + +* Traffic Generator + + * Generator Tasks - Composted of 1 or more tasks (It is possible to + have multiple tasks sending packets to same port No. See Tasks Ai and Aii + plus Di and Dii) + + * Task Ai - Generates Packets on Port 0 of Traffic Generator + and send to Port 0 of SUT Port 0 + * Task Aii - Generates Packets on Port 0 of Traffic Generator + and send to Port 0 of SUT Port 0 + * Task B - Generates Packets on Port 1 of Traffic Generator + and send to Port 1 of SUT Port 1 + * Task C - Generates Packets on Port 2 of Traffic Generator + and send to Port 2 of SUT Port 2 + * Task Di - Generates Packets on Port 3 of Traffic Generator + and send to Port 3 of SUT Port 3 + * Task Dii - Generates Packets on Port 0 of Traffic Generator + and send to Port 0 of SUT Port 0 + + * Verifier Tasks - Composed of 1 or more tasks which receives + packets from SUT + + * Task E - Receives packets on Port 0 of Traffic Generator sent + from Port 0 of SUT Port 0 + * Task F - Receives packets on Port 1 of Traffic Generator sent + from Port 1 of SUT Port 1 + * Task G - Receives packets on Port 2 of Traffic Generator sent + from Port 2 of SUT Port 2 + * Task H - Receives packets on Port 3 of Traffic Generator sent + from Port 3 of SUT Port 3 + +* SUT + + * Receiver Tasks - Receives packets from generator - Composed on 1 or + more tasks which consume the packs sent from Traffic Generator + + * Task A - Receives Packets on Port 0 of System-Under-Test from + Traffic Generator Port 0, and forwards packets to Task E + * Task B - Receives Packets on Port 1 of System-Under-Test from + Traffic Generator Port 1, and forwards packets to Task E + * Task C - Receives Packets on Port 2 of System-Under-Test from + Traffic Generator Port 2, and forwards packets to Task E + * Task D - Receives Packets on Port 3 of System-Under-Test from + Traffic Generator Port 3, and forwards packets to Task E + + * Processing Tasks - Composed of multiple tasks in series which carry + out some processing on received packets before forwarding to the + task. + + * Task E - This receives packets from the Receiver Tasks, + carries out some operation on the data and forwards to result + packets to the next task in the sequence - Task F + * Task F - This receives packets from the previous Task - Task + E, carries out some operation on the data and forwards to result + packets to the next task in the sequence - Task G + * Task G - This receives packets from the previous Task - Task F + and distributes the result packages to the Transmitter tasks + + * Transmitter Tasks - Composed on 1 or more tasks which send the + processed packets back to the Traffic Generator + + * Task H - Receives Packets from Task G of System-Under-Test and + sends packets to Traffic Generator Port 0 + * Task I - Receives Packets from Task G of System-Under-Test and + sends packets to Traffic Generator Port 1 + * Task J - Receives Packets from Task G of System-Under-Test and + sends packets to Traffic Generator Port 2 + * Task K - Receives Packets From Task G of System-Under-Test and + sends packets to Traffic Generator Port 3 + +NSB Prox Test +============= + +A NSB Prox test is composed of the following components :- + +* Test Description File. Usually called + ``tc_prox_<context>_<test>-<ports>.yaml`` where + + * <context> is either ``baremetal`` or ``heat_context`` + * <test> is the a one or 2 word description of the test. + * <ports> is the number of ports used + + Example tests ``tc_prox_baremetal_l2fwd-2.yaml`` or + ``tc_prox_heat_context_vpe-4.yaml``. This file describes the components + of the test, in the case of openstack the network description and + server descriptions, in the case of baremetal the hardware + description location. It also contains the name of the Traffic Generator, the SUT config file + and the traffic profile description, all described below. See nsb-test-description-label_ + +* Traffic Profile file. Example ``prox_binsearch.yaml``. This describes the packet size, tolerated + loss, initial line rate to start traffic at, test interval etc See nsb-traffic-profile-label_ + +* Traffic Generator Config file. Usually called ``gen_<test>-<ports>.cfg``. + + This describes the activity of the traffic generator + + * What each core of the traffic generator does, + * The packet of data sent by a core on a port of the traffic generator + to the system under test + * What core is used to wait on what port for data from the system + under test. + + Example traffic generator config file ``gen_l2fwd-4.cfg`` + See nsb-traffic-generator-label_ + +* SUT Config file. Usually called ``handle_<test>-<ports>.cfg``. + + This describes the activity of the SUTs + + * What each core of the does, + * What cores receives packets from what ports + * What cores perform operations on the packets and pass the packets onto + another core + * What cores receives packets from what cores and transmit the packets on + the ports to the Traffic Verifier tasks of the Traffic Generator. + + Example traffic generator config file ``handle_l2fwd-4.cfg`` + See nsb-sut-generator-label_ + +* NSB PROX Baremetal Configuration file. Usually called + ``prox-baremetal-<ports>.yaml`` + + * <ports> is the number of ports used + + This is required for baremetal only. This describes hardware, NICs, + IP addresses, Network drivers, usernames and passwords. + See baremetal-config-label_ + +* Grafana Dashboard. Usually called + ``Prox_<context>_<test>-<port>-<DateAndTime>.json`` where + + * <context> Is either ``BM`` or ``heat`` + * <test> Is the a one or 2 word description of the test. + * <port> is the number of ports used express as ``2Port`` or ``4Port`` + * <DateAndTime> is the Date and Time expressed as a string. + + Example grafana dashboard ``Prox_BM_L2FWD-4Port-1507804504588.json`` + +Other files may be required. These are test specific files and will be +covered later. + +.. _nsb-test-description-label: + +**Test Description File** + +Here we will discuss the test description for both +baremetal and openstack. + +*Test Description File for Baremetal* +------------------------------------- + +This section will introduce the meaning of the Test case description +file. We will use ``tc_prox_baremetal_l2fwd-2.yaml`` as an example to +show you how to understand the test description file. + +.. image:: images/PROX_Test_BM_Script.png + :width: 800px + :alt: NSB PROX Test Description File + +Now let's examine the components of the file in detail + +1. ``traffic_profile`` - This specifies the traffic profile for the + test. In this case ``prox_binsearch.yaml`` is used. See nsb-traffic-profile-label_ + +2. ``topology`` - This is either ``prox-tg-topology-1.yaml`` or + ``prox-tg-topology-2.yaml`` or ``prox-tg-topology-4.yaml`` + depending on number of ports required. + +3. ``nodes`` - This names the Traffic Generator and the System + under Test. Does not need to change. + +4. ``prox_path`` - Location of the Prox executable on the traffic + generator (Either baremetal or Openstack Virtual Machine) + +5. ``prox_config`` - This is the ``SUT Config File``. + In this case it is ``handle_l2fwd-2.cfg`` + + A number of additional parameters can be added. This example + is taken from VPE:: + + options: + vnf__0: + prox_path: /opt/nsb_bin/prox + prox_config: ``configs/handle_vpe-4.cfg`` + prox_args: + ``-t``: ```` + prox_files: + ``configs/vpe_ipv4.lua`` : ```` + ``configs/vpe_dscp.lua`` : ```` + ``configs/vpe_cpe_table.lua`` : ```` + ``configs/vpe_user_table.lua`` : ```` + ``configs/vpe_rules.lua`` : ```` + prox_generate_parameter: True + + ``prox_files`` - this specified that a number of addition files + need to be provided for the test to run correctly. This files + could provide routing information,hashing information or a + hashing algorithm and ip/mac information. + + ``prox_generate_parameter`` - this specifies that the NSB application + is required to provide information to the nsb Prox in the form + of a file called ``parameters.lua``, which contains information + retrieved from either the hardware or the openstack configuration. + +6. ``prox_args`` - this specifies the command line arguments to start + prox. See `prox command line`_. + +7. ``prox_config`` - This specifies the Traffic Generator config file. + +8. ``runner`` - This is set to ``Duration`` - This specified that the + test run for a set duration. Other runner types are available + but it is recommend to use ``Duration`` + +9. ``context`` - This is ``context`` for a 2 port Baremetal configuration. + If a 4 port configuration was required then file + ``prox-baremetal-4.yaml`` would be used. This is the NSB Prox + baremetal configuration file. + +.. _nsb-traffic-profile-label: + +*Traffic Profile file* +---------------------- + +This describes the details of the traffic flow. In this case ``prox_binsearch.yaml`` is used. + +.. image:: images/PROX_Traffic_profile.png + :width: 800px + :alt: NSB PROX Traffic Profile + + +1. ``name`` - The name of the traffic profile. This name should match the name specified in the + ``traffic_profile`` field in the Test Description File. + +2. ``traffic_type`` - This specifies the type of traffic pattern generated, This name matches + class name of the traffic generator See:: + + network_services/traffic_profile/prox_binsearch.py class ProxBinSearchProfile(ProxProfile) + + In this case it lowers the traffic rate until the number of packets + sent is equal to the number of packets received (plus a + tolerated loss). Once it achieves this it increases the traffic + rate in order to find the highest rate with no traffic loss. + + Custom traffic types can be created by creating a new traffic profile class. + +3. ``tolerated_loss`` - This specifies the percentage of packets that can be lost/dropped before + we declare success or failure. Success is Transmitted-Packets from Traffic Generator is greater than or equal to + packets received by Traffic Generator plus tolerated loss. + +4. ``test_precision`` - This specifies the precision of the test results. For some tests the success criteria + may never be achieved because the test precision may be greater than the successful throughput. For finer + results increase the precision by making this value smaller. + +5. ``packet_sizes`` - This specifies the range of packets size this test is run for. + +6. ``duration`` - This specifies the sample duration that the test uses to check for success or failure. + +7. ``lower_bound`` - This specifies the test initial lower bound sample rate. On success this value is increased. + +8. ``upper_bound`` - This specifies the test initial upper bound sample rate. On success this value is decreased. + +Other traffic profiles exist eg prox_ACL.yaml which does not +compare what is received with what is transmitted. It just +sends packet at max rate. + +It is possible to create custom traffic profiles with by +creating new file in the same folder as prox_binsearch.yaml. +See this prox_vpe.yaml as example:: + + schema: ``nsb:traffic_profile:0.1`` + + name: prox_vpe + description: Prox vPE traffic profile + + traffic_profile: + traffic_type: ProxBinSearchProfile + tolerated_loss: 100.0 #0.001 + test_precision: 0.01 + # The minimum size of the Ethernet frame for the vPE test is 68 bytes. + packet_sizes: [68] + duration: 5 + lower_bound: 0.0 + upper_bound: 100.0 + +*Test Description File for Openstack* +------------------------------------- + +We will use ``tc_prox_heat_context_l2fwd-2.yaml`` as a example to show +you how to understand the test description file. + +.. image:: images/PROX_Test_HEAT_Script.png + :width: 800px + :alt: NSB PROX Test Description File + +Now lets examine the components of the file in detail + +Sections 1 to 8 are exactly the same in Baremetal and in Heat. Section +``9`` is replaced with sections A to F. Section 9 was for a baremetal +configuration file. This has no place in a heat configuration. + +A. ``image`` - yardstick-samplevnfs. This is the name of the image + created during the installation of NSB. This is fixed. + +B. ``flavor`` - The flavor is created dynamically. However we could + use an already existing flavor if required. In that case the + flavor would be named:: + + flavor: yardstick-flavor + +C. ``extra_specs`` - This allows us to specify the number of + cores sockets and hyperthreading assigned to it. In this case + we have 1 socket with 10 codes and no hyperthreading enabled. + +D. ``placement_groups`` - default. Do not change for NSB PROX. + +E. ``servers`` - ``tg_0`` is the traffic generator and ``vnf_0`` + is the system under test. + +F. ``networks`` - is composed of a management network labeled ``mgmt`` + and one uplink network labeled ``uplink_0`` and one downlink + network labeled ``downlink_0`` for 2 ports. If this was a 4 port + configuration there would be 2 extra downlink ports. See this + example from a 4 port l2fwd test.:: + + networks: + mgmt: + cidr: '10.0.1.0/24' + uplink_0: + cidr: '10.0.2.0/24' + gateway_ip: 'null' + port_security_enabled: False + enable_dhcp: 'false' + downlink_0: + cidr: '10.0.3.0/24' + gateway_ip: 'null' + port_security_enabled: False + enable_dhcp: 'false' + downlink_1: + cidr: '10.0.4.0/24' + gateway_ip: 'null' + port_security_enabled: False + enable_dhcp: 'false' + downlink_2: + cidr: '10.0.5.0/24' + gateway_ip: 'null' + port_security_enabled: False + enable_dhcp: 'false' + +.. _nsb-traffic-generator-label: + +*Traffic Generator Config file* +------------------------------- + +This section will describe the traffic generator config file. +This is the same for both baremetal and heat. See this example +of ``gen_l2fwd_multiflow-2.cfg`` to explain the options. + +.. image:: images/PROX_Gen_2port_cfg.png + :width: 1400px + :alt: NSB PROX Gen Config File + +The configuration file is divided into multiple sections, each +of which is used to define some parameters and options.:: + + [eal options] + [variables] + [port 0] + [port 1] + [port .] + [port Z] + [defaults] + [global] + [core 0] + [core 1] + [core 2] + [core .] + [core Z] + +See `prox options`_ for details + +Now let's examine the components of the file in detail + +1. ``[eal options]`` - This specified the EAL (Environmental + Abstraction Layer) options. These are default values and + are not changed. See `dpdk wiki page`_. + +2. ``[variables]`` - This section contains variables, as + the name suggests. Variables for Core numbers, mac + addresses, ip addresses etc. They are assigned as a + ``key = value`` where the key is used in place of the value. + + .. caution:: + A special case for valuables with a value beginning with + ``@@``. These values are dynamically updated by the NSB + application at run time. Values like MAC address, + IP Address etc. + +3. ``[port 0]`` - This section describes the DPDK Port. The number + following the keyword ``port`` usually refers to the DPDK Port + Id. usually starting from ``0``. Because you can have multiple + ports this entry usually repeated. Eg. For a 2 port setup + ``[port0]`` and ``[port 1]`` and for a 4 port setup ``[port 0]``, + ``[port 1]``, ``[port 2]`` and ``[port 3]``:: + + [port 0] + name=p0 + mac=hardware + rx desc=2048 + tx desc=2048 + promiscuous=yes + + a. In this example ``name = p0`` assigned the name ``p0`` to the + port. Any name can be assigned to a port. + b. ``mac=hardware`` sets the MAC address assigned by the hardware + to data from this port. + c. ``rx desc=2048`` sets the number of available descriptors to + allocate for receive packets. This can be changed and can + effect performance. + d. ``tx desc=2048`` sets the number of available descriptors to + allocate for transmit packets. This can be changed and can + effect performance. + e. ``promiscuous=yes`` this enables promiscuous mode for this port. + +4. ``[defaults]`` - Here default operations and settings can be over + written. In this example ``mempool size=4K`` the number of mbufs + per task is altered. Altering this value could effect + performance. See `prox options`_ for details. + +5. ``[global]`` - Here application wide setting are supported. Things + like application name, start time, duration and memory + configurations can be set here. In this example.:: + + [global] + start time=5 + name=Basic Gen + + a. ``start time=5`` Time is seconds after which average + stats will be started. + b. ``name=Basic Gen`` Name of the configuration. + +6. ``[core 0]`` - This core is designated the master core. Every + Prox application must have a master core. The master mode must + be assigned to exactly one task, running alone on one core.:: + + [core 0] + mode=master + +7. ``[core 1]`` - This describes the activity on core 1. Cores can + be configured by means of a set of [core #] sections, where + # represents either: + + a. an absolute core number: e.g. on a 10-core, dual socket + system with hyper-threading, + cores are numbered from 0 to 39. + + b. PROX allows a core to be identified by a core number, the + letter 's', and a socket number. + + It is possible to write a baremetal and an openstack test which use + the same traffic generator config file and SUT config file. + In this case it is advisable not to use physical + core numbering. + + However it is also possible to write NSB Prox tests that + have been optimized for a particular hardware configuration. + In this case it is advisable to use the core numbering. + It is up to the user to make sure that cores from + the right sockets are used (i.e. from the socket on which the NIC + is attached to), to ensure good performance (EPA). + + Each core can be assigned with a set of tasks, each running + one of the implemented packet processing modes.:: + + [core 1] + name=p0 + task=0 + mode=gen + tx port=p0 + bps=1250000000 + ; Ethernet + IP + UDP + pkt inline=${sut_mac0} 70 00 00 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d 98 10 64 01 98 10 64 02 13 88 13 88 00 08 55 7b + ; src_ip: 152.16.100.0/8 + random=0000XXX1 + rand_offset=29 + ; dst_ip: 152.16.100.0/8 + random=0000XXX0 + rand_offset=33 + random=0001001110001XXX0001001110001XXX + rand_offset=34 + + a. ``name=p0`` - Name assigned to the core. + b. ``task=0`` - Each core can run a set of tasks. Starting with ``0``. + Task 1 can be defined later in this core or + can be defined in another ``[core 1]`` section with ``task=1`` + later in configuration file. Sometimes running + multiple task related to the same packet on the same physical + core improves performance, however sometimes it + is optimal to move task to a separate core. This is best + decided by checking performance. + c. ``mode=gen`` - Specifies the action carried out by this task on + this core. Supported modes are: classify, drop, gen, lat, genl4, nop, l2fwd, gredecap, + greencap, lbpos, lbnetwork, lbqinq, lb5tuple, ipv6_decap, ipv6_encap, + qinqdecapv4, qinqencapv4, qos, routing, impair, + mirror, unmpls, tagmpls, nat, decapnsh, encapnsh, police, acl + Which are :- + + * Classify + * Drop + * Basic Forwarding (no touch) + * L2 Forwarding (change MAC) + * GRE encap/decap + * Load balance based on packet fields + * Symmetric load balancing + * QinQ encap/decap IPv4/IPv6 + * ARP + * QoS + * Routing + * Unmpls + * Nsh encap/decap + * Policing + * ACL + + In the traffic generator we expect a core to generate packets (``gen``) + and to receive packets & calculate latency (``lat``) + This core does ``gen`` . ie it is a traffic generator. + + To understand what each of the modes support please see + `prox documentation`_. + + d. ``tx port=p0`` - This specifies that the packets generated are + transmitted to port ``p0`` + e. ``bps=1250000000`` - This indicates Bytes Per Second to + generate packets. + f. ``; Ethernet + IP + UDP`` - This is a comment. Items starting with + ``;`` are ignored. + g. ``pkt inline=${sut_mac0} 70 00 00 00 ...`` - Defines the packet + format as a sequence of bytes (each + expressed in hexadecimal notation). This defines the packet + that is generated. This packets begins + with the hexadecimal sequence assigned to ``sut_mac`` and the + remainder of the bytes in the string. + This packet could now be sent or modified by ``random=..`` + described below before being sent to target. + h. ``; src_ip: 152.16.100.0/8`` - Comment + i. ``random=0000XXX1`` - This describes a field of the packet + containing random data. This string can be + 8,16,24 or 32 character long and represents 1,2,3 or 4 + bytes of data. In this case it describes a byte of + data. Each character in string can be 0,1 or ``X``. 0 or 1 + are fixed bit values in the data packet and ``X`` is a + random bit. So random=0000XXX1 generates 00000001(1), + 00000011(3), 00000101(5), 00000111(7), + 00001001(9), 00001011(11), 00001101(13) and 00001111(15) + combinations. + j. ``rand_offset=29`` - Defines where to place the previously + defined random field. + k. ``; dst_ip: 152.16.100.0/8`` - Comment + l. ``random=0000XXX0`` - This is another random field which + generates a byte of 00000000(0), 00000010(2), + 00000100(4), 00000110(6), 00001000(8), 00001010(10), + 00001100(12) and 00001110(14) combinations. + m. ``rand_offset=33`` - Defines where to place the previously + defined random field. + n. ``random=0001001110001XXX0001001110001XXX`` - This is + another random field which generates 4 bytes. + o. ``rand_offset=34`` - Defines where to place the previously + defined 4 byte random field. + + Core 2 executes same scenario as Core 1. The only difference + in this case is that the packets are generated + for Port 1. + +8. ``[core 3]`` - This defines the activities on core 3. The purpose + of ``core 3`` and ``core 4`` is to receive packets + sent by the SUT.:: + + [core 3] + name=rec 0 + task=0 + mode=lat + rx port=p0 + lat pos=42 + + a. ``name=rec 0`` - Name assigned to the core. + b. ``task=0`` - Each core can run a set of tasks. Starting with + ``0``. Task 1 can be defined later in this core or + can be defined in another ``[core 1]`` section with + ``task=1`` later in configuration file. Sometimes running + multiple task related to the same packet on the same + physical core improves performance, however sometimes it + is optimal to move task to a separate core. This is + best decided by checking performance. + c. ``mode=lat`` - Specifies the action carried out by this task on this core. Supported modes are: acl, + classify, drop, gredecap, greencap, ipv6_decap, ipv6_encap, l2fwd, lbnetwork, lbpos, lbqinq, nop, + police, qinqdecapv4, qinqencapv4, qos, routing, impair, lb5tuple, mirror, unmpls, tagmpls, + nat, decapnsh, encapnsh, gen, genl4 and lat. This task(0) per core(3) receives packets on port. + d. ``rx port=p0`` - The port to receive packets on ``Port 0``. Core 4 will receive packets on ``Port 1``. + e. ``lat pos=42`` - Describes where to put a 4-byte timestamp in the packet. Note that the packet length should + be longer than ``lat pos`` + 4 bytes to avoid truncation of the timestamp. It defines where the timestamp is + to be read from. Note that the SUT workload might cause the position of the timestamp to change + (i.e. due to encapsulation). + +.. _nsb-sut-generator-label: + +*SUT Config file* +------------------------------- + +This section will describes the SUT(VNF) config file. This is the same for both +baremetal and heat. See this example of ``handle_l2fwd_multiflow-2.cfg`` to explain the options. + +.. image:: images/PROX_Handle_2port_cfg.png + :width: 1400px + :alt: NSB PROX Handle Config File + +See `prox options`_ for details + +Now let's examine the components of the file in detail + +1. ``[eal options]`` - same as the Generator config file. This specified the EAL (Environmental Abstraction Layer) + options. These are default values and are not changed. + See `dpdk wiki page`_. + +2. ``[port 0]`` - This section describes the DPDK Port. The number following the keyword ``port`` usually refers to the DPDK Port Id. usually starting from ``0``. + Because you can have multiple ports this entry usually repeated. Eg. For a 2 port setup ``[port0]`` and ``[port 1]`` and for a 4 port setup ``[port 0]``, ``[port 1]``, + ``[port 2]`` and ``[port 3]``:: + + [port 0] + name=if0 + mac=hardware + rx desc=2048 + tx desc=2048 + promiscuous=yes + + a. In this example ``name =if0`` assigned the name ``if0`` to the port. Any name can be assigned to a port. + b. ``mac=hardware`` sets the MAC address assigned by the hardware to data from this port. + c. ``rx desc=2048`` sets the number of available descriptors to allocate for receive packets. This can be changed and can effect performance. + d. ``tx desc=2048`` sets the number of available descriptors to allocate for transmit packets. This can be changed and can effect performance. + e. ``promiscuous=yes`` this enables promiscuous mode for this port. + +3. ``[defaults]`` - Here default operations and settings can be over written.:: + + [defaults] + mempool size=8K + memcache size=512 + + a. In this example ``mempool size=8K`` the number of mbufs per task is altered. Altering this value could effect performance. See `prox options`_ for details. + b. ``memcache size=512`` - number of mbufs cached per core, default is 256 this is the cache_size. Altering this value could effect performance. + +4. ``[global]`` - Here application wide setting are supported. Things like application name, start time, duration and memory configurations can be set here. + In this example.:: + + [global] + start time=5 + name=Basic Gen + + a. ``start time=5`` Time is seconds after which average stats will be started. + b. ``name=Handle L2FWD Multiflow (2x)`` Name of the configuration. + +5. ``[core 0]`` - This core is designated the master core. Every Prox application must have a master core. The master mode must be assigned to + exactly one task, running alone on one core.:: + + [core 0] + mode=master + +6. ``[core 1]`` - This describes the activity on core 1. Cores can be configured by means of a set of [core #] sections, where # represents either: + + a. an absolute core number: e.g. on a 10-core, dual socket system with hyper-threading, + cores are numbered from 0 to 39. + + b. PROX allows a core to be identified by a core number, the letter 's', and a socket number. + However NSB PROX is hardware agnostic (physical and virtual configurations are the same) it + is advisable no to use physical core numbering. + + Each core can be assigned with a set of tasks, each running one of the implemented packet processing modes.:: + + [core 1] + name=none + task=0 + mode=l2fwd + dst mac=@@tester_mac1 + rx port=if0 + tx port=if1 + + a. ``name=none`` - No name assigned to the core. + b. ``task=0`` - Each core can run a set of tasks. Starting with ``0``. Task 1 can be defined later in this core or + can be defined in another ``[core 1]`` section with ``task=1`` later in configuration file. Sometimes running + multiple task related to the same packet on the same physical core improves performance, however sometimes it + is optimal to move task to a separate core. This is best decided by checking performance. + c. ``mode=l2fwd`` - Specifies the action carried out by this task on this core. Supported modes are: acl, + classify, drop, gredecap, greencap, ipv6_decap, ipv6_encap, l2fwd, lbnetwork, lbpos, lbqinq, nop, + police, qinqdecapv4, qinqencapv4, qos, routing, impair, lb5tuple, mirror, unmpls, tagmpls, + nat, decapnsh, encapnsh, gen, genl4 and lat. This code does ``l2fwd`` .. ie it does the L2FWD. + + d. ``dst mac=@@tester_mac1`` - The destination mac address of the packet will be set to the MAC address of ``Port 1`` of destination device. (The Traffic Generator/Verifier) + e. ``rx port=if0`` - This specifies that the packets are received from ``Port 0`` called if0 + f. ``tx port=if1`` - This specifies that the packets are transmitted to ``Port 1`` called if1 + + If this example we receive a packet on core on a port, carry out operation on the packet on the core and transmit it on on another port still using the same task on the same core. + + On some implementation you may wish to use multiple tasks, like this.:: + + [core 1] + name=rx_task + task=0 + mode=l2fwd + dst mac=@@tester_p0 + rx port=if0 + tx cores=1t1 + drop=no + + name=l2fwd_if0 + task=1 + mode=nop + rx ring=yes + tx port=if0 + drop=no + + In this example you can see Core 1/Task 0 called ``rx_task`` receives the packet from if0 and perform the l2fwd. However instead of sending the packet to a + port it sends it to a core see ``tx cores=1t1``. In this case it sends it to Core 1/Task 1. + + Core 1/Task 1 called ``l2fwd_if0``, receives the packet, not from a port but from the ring. See ``rx ring=yes``. It does not perform any operation on the packet See ``mode=none`` + and sends the packets to ``if0`` see ``tx port=if0``. + + It is also possible to implement more complex operations be chaining multiple operations in sequence and using rings to pass packets from one core to another. + + In thus example we show a Broadband Network Gateway (BNG) with Quality of Service (QoS). Communication from task to task is via rings. + + .. image:: images/PROX_BNG_QOS.png + :width: 1000px + :alt: NSB PROX Config File for BNG_QOS + +*Baremetal Configuration file* +------------------------------ + +.. _baremetal-config-label: + +This is required for baremetal testing. It describes the IP address of the various ports, the Network devices drivers and MAC addresses and the network +configuration. + +In this example we will describe a 2 port configuration. This file is the same for all 2 port NSB Prox tests on the same platforms/configuration. + + .. image:: images/PROX_Baremetal_config.png + :width: 1000px + :alt: NSB PROX Yardstick Config + +Now lets describe the sections of the file. + + 1. ``TrafficGen`` - This section describes the Traffic Generator node of the test configuration. The name of the node ``trafficgen_1`` must match the node name + in the ``Test Description File for Baremetal`` mentioned earlier. The password attribute of the test needs to be configured. All other parameters + can remain as default settings. + 2. ``interfaces`` - This defines the DPDK interfaces on the Traffic Generator. + 3. ``xe0`` is DPDK Port 0. ``lspci`` and `` ./dpdk-devbind.py -s`` can be used to provide the interface information. ``netmask`` and ``local_ip`` should not be changed + 4. ``xe1`` is DPDK Port 1. If more than 2 ports are required then ``xe1`` section needs to be repeated and modified accordingly. + 5. ``vnf`` - This section describes the SUT of the test configuration. The name of the node ``vnf`` must match the node name in the + ``Test Description File for Baremetal`` mentioned earlier. The password attribute of the test needs to be configured. All other parameters + can remain as default settings + 6. ``interfaces`` - This defines the DPDK interfaces on the SUT + 7. ``xe0`` - Same as 3 but for the ``SUT``. + 8. ``xe1`` - Same as 4 but for the ``SUT`` also. + 9. ``routing_table`` - All parameters should remain unchanged. + 10. ``nd_route_tbl`` - All parameters should remain unchanged. + +*Grafana Dashboard* +------------------- + +The grafana dashboard visually displays the results of the tests. The steps required to produce a grafana dashboard are described here. + +.. _yardstick-config-label: + + a. Configure ``yardstick`` to use influxDB to store test results. See file ``/etc/yardstick/yardstick.conf``. + + .. image:: images/PROX_Yardstick_config.png + :width: 1000px + :alt: NSB PROX Yardstick Config + + 1. Specify the dispatcher to use influxDB to store results. + 2. "target = .. " - Specify location of influxDB to store results. + "db_name = yardstick" - name of database. Do not change + "username = root" - username to use to store result. (Many tests are run as root) + "password = ... " - Please set to root user password + + b. Deploy InfludDB & Grafana. See how to Deploy InfluxDB & Grafana. See `grafana deployment`_. + c. Generate the test data. Run the tests as follows .:: + + yardstick --debug task start tc_prox_<context>_<test>-ports.yaml + + eg.:: + + yardstick --debug task start tc_prox_heat_context_l2fwd-4.yaml + + d. Now build the dashboard for the test you just ran. The easiest way to do this is to copy an existing dashboard and rename the + test and the field names. The procedure to do so is described here. See `opnfv grafana dashboard`_. + +How to run NSB Prox Test on an baremetal environment +==================================================== + +In order to run the NSB PROX test. + + 1. Install NSB on Traffic Generator node and Prox in SUT. See `NSB Installation`_ + + 2. To enter container:: + + docker exec -it yardstick /bin/bash + + 3. Install baremetal configuration file (POD files) + + a. Go to location of PROX tests in container :: + + cd /home/opnfv/repos/yardstick/samples/vnf_samples/nsut/prox + + b. Install prox-baremetal-2.yam and prox-baremetal-4.yaml for that topology + into this directory as per baremetal-config-label_ + + c. Install and configure ``yardstick.conf`` :: + + cd /etc/yardstick/ + + Modify /etc/yardstick/yardstick.conf as per yardstick-config-label_ + + 4. Execute the test. Eg.:: + + yardstick --debug task start ./tc_prox_baremetal_l2fwd-4.yaml + +How to run NSB Prox Test on an Openstack environment +==================================================== + +In order to run the NSB PROX test. + + 1. Install NSB on Openstack deployment node. See `NSB Installation`_ + + 2. To enter container:: + + docker exec -it yardstick /bin/bash + + 3. Install configuration file + + a. Goto location of PROX tests in container :: + + cd /home/opnfv/repos/yardstick/samples/vnf_samples/nsut/prox + + b. Install and configure ``yardstick.conf`` :: + + cd /etc/yardstick/ + + Modify /etc/yardstick/yardstick.conf as per yardstick-config-label_ + + + 4. Execute the test. Eg.:: + + yardstick --debug task start ./tc_prox_heat_context_l2fwd-4.yaml + +Frequently Asked Questions +========================== + +Here is a list of frequently asked questions. + +*NSB Prox does not work on Baremetal, How do I resolve this?* +------------------------------------------------------------- + +If PROX NSB does not work on baremetal, problem is either in network configuration or test file. + +*Solution* + +1. Verify network configuration. Execute existing baremetal test.:: + + yardstick --debug task start ./tc_prox_baremetal_l2fwd-4.yaml + + If test does not work then error in network configuration. + + a. Check DPDK on Traffic Generator and SUT via:- :: + + /root/dpdk-17./usertools/dpdk-devbind.py + + b. Verify MAC addresses match ``prox-baremetal-<ports>.yaml`` via ``ifconfig`` and ``dpdk-devbind`` + + c. Check your eth port is what you expect. You would not be the first person to think that + the port your cable is plugged into is ethX when in fact it is ethY. Use + ethtool to visually confirm that the eth is where you expect.:: + + ethtool -p ethX + + A led should start blinking on port. (On both System-Under-Test and Traffic Generator) + + d. Check cable. + + Install Linux kernel network driver and ensure your ports are + ``bound`` to the driver via ``dpdk-devbind``. Bring up port on both + SUT and Traffic Generator and check connection. + + i) On SUT and on Traffic Generator:: + + ifconfig ethX/enoX up + + ii) Check link + + ethtool ethX/enoX + + See ``Link detected`` if ``yes`` .... Cable is good. If ``no`` you have an issue with your cable/port. + +2. If existing baremetal works then issue is with your test. Check the traffic generator gen_<test>-<ports>.cfg to ensure + it is producing a valid packet. + +*How do I debug NSB Prox on Baremetal?* +--------------------------------------- + +*Solution* + +1. Execute the test as follows:: + + yardstick --debug task start ./tc_prox_baremetal_l2fwd-4.yaml + +2. Login to Traffic Generator as ``root``.:: + + cd + /opt/nsb_bin/prox -f /tmp/gen_<test>-<ports>.cfg + +3. Login to SUT as ``root``.:: + + cd + /opt/nsb_bin/prox -f /tmp/handle_<test>-<ports>.cfg + +4. Now let's examine the Generator Output. In this case the output of gen_l2fwd-4.cfg. + + .. image:: images/PROX_Gen_GUI.png + :width: 1000px + :alt: NSB PROX Traffic Generator GUI + + Now let's examine the output + + 1. Indicates the amount of data successfully transmitted on Port 0 + 2. Indicates the amount of data successfully received on port 1 + 3. Indicates the amount of data successfully handled for port 1 + + It appears what is transmitted is received. + + .. Caution:: + The number of packets MAY not exactly match because the ports are read in sequence. + + .. Caution:: + What is transmitted on PORT X may not always be received on same port. Please check the Test scenario. + +5. Now lets examine the SUT Output + + .. image:: images/PROX_SUT_GUI.png + :width: 1400px + :alt: NSB PROX SUT GUI + + Now lets examine the output + + 1. What is received on 0 is transmitted on 1, received on 1 transmitted on 0, + received on 2 transmitted on 3 and received on 3 transmitted on 2. + 2. No packets are Failed. + 3. No Packets are discarded. + + We can also dump the packets being received or transmitted via the following commands. :: + + dump Arguments: <core id> <task id> <nb packets> + Create a hex dump of <nb_packets> from <task_id> on <core_id> showing how + packets have changed between RX and TX. + dump_rx Arguments: <core id> <task id> <nb packets> + Create a hex dump of <nb_packets> from <task_id> on <core_id> at RX + dump_tx Arguments: <core id> <task id> <nb packets> + Create a hex dump of <nb_packets> from <task_id> on <core_id> at TX + + eg.:: + + dump_tx 1 0 1 + +*NSB Prox works on Baremetal but not in Openstack. How do I resolve this?* +-------------------------------------------------------------------------- + +NSB Prox on Baremetal is a lot more forgiving than NSB Prox on Openstack. A badly +formed packed may still work with PROX on Baremetal. However on +Openstack the packet must be correct and all fields of the header correct. +Eg A packet with an invalid Protocol ID would still work in Baremetal +but this packet would be rejected by openstack. + +*Solution* + + 1. Check the validity of the packet. + 2. Use a known good packet in your test + 3. If using ``Random`` fields in the traffic generator, disable them and retry. + + +*How do I debug NSB Prox on Openstack?* +--------------------------------------- + +*Solution* + +1. Execute the test as follows:: + + yardstick --debug task start --keep-deploy ./tc_prox_heat_context_l2fwd-4.yaml + +2. Access docker image if required via:: + + docker exec -it yardstick /bin/bash + +3. Install openstack credentials. + + Depending on your openstack deployment, the location of these credentials may vary. + On this platform I do this via:: + + scp root@10.237.222.55:/etc/kolla/admin-openrc.sh . + source ./admin-openrc.sh + +4. List Stack details + + a. Get the name of the Stack. + + .. image:: images/PROX_Openstack_stack_list.png + :width: 1000px + :alt: NSB PROX openstack stack list + + b. Get the Floating IP of the Traffic Generator & SUT + + This generates a lot of information. Please not the floating IP of the VNF and + the Traffic Generator. + + .. image:: images/PROX_Openstack_stack_show_a.png + :width: 1000px + :alt: NSB PROX openstack stack show (Top) + + From here you can see the floating IP Address of the SUT / VNF + + .. image:: images/PROX_Openstack_stack_show_b.png + :width: 1000px + :alt: NSB PROX openstack stack show (Top) + + From here you can see the floating IP Address of the Traffic Generator + + c. Get ssh identity file + + In the docker container locate the identity file.:: + + cd /home/opnfv/repos/yardstick/yardstick/resources/files + ls -lt + +5. Login to SUT as ``Ubuntu``.:: + + ssh -i ./yardstick_key-01029d1d ubuntu@172.16.2.158 + + Change to root:: + + sudo su + + Now continue as baremetal. + +6. Login to SUT as ``Ubuntu``.:: + + ssh -i ./yardstick_key-01029d1d ubuntu@172.16.2.156 + + Change to root:: + + sudo su + + Now continue as baremetal. + +*How do I resolve "Quota exceeded for resources"* +------------------------------------------------- + +*Solution* + +This usually occurs due to 2 reasons when executing an openstack test. + +1. One or more stacks already exists and are consuming all resources. To resolve :: + + openstack stack list + + Response:: + + +--------------------------------------+--------------------+-----------------+----------------------+--------------+ + | ID | Stack Name | Stack Status | Creation Time | Updated Time | + +--------------------------------------+--------------------+-----------------+----------------------+--------------+ + | acb559d7-f575-4266-a2d4-67290b556f15 | yardstick-e05ba5a4 | CREATE_COMPLETE | 2017-12-06T15:00:05Z | None | + | 7edf21ce-8824-4c86-8edb-f7e23801a01b | yardstick-08bda9e3 | CREATE_COMPLETE | 2017-12-06T14:56:43Z | None | + +--------------------------------------+--------------------+-----------------+----------------------+--------------+ + + In this case 2 stacks already exist. + + To remove stack:: + + openstack stack delete yardstick-08bda9e3 + Are you sure you want to delete this stack(s) [y/N]? y + +2. The openstack configuration quotas are too small. + + The solution is to increase the quota. Use below to query existing quotas:: + + openstack quota show + + And to set quota:: + + openstack quota set <resource> + +*Openstack Cli fails or hangs. How do I resolve this?* +------------------------------------------------------ + +*Solution* + +If it fails due to :: + + Missing value auth-url required for auth plugin password + +Check your shell environment for Openstack variables. One of them should contain the authentication URL :: + + + OS_AUTH_URL=``https://192.168.72.41:5000/v3`` + +Or similar. Ensure that openstack configurations are exported. :: + + cat /etc/kolla/admin-openrc.sh + +Result :: + + export OS_PROJECT_DOMAIN_NAME=default + export OS_USER_DOMAIN_NAME=default + export OS_PROJECT_NAME=admin + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=BwwSEZqmUJA676klr9wa052PFjNkz99tOccS9sTc + export OS_AUTH_URL=http://193.168.72.41:35357/v3 + export OS_INTERFACE=internal + export OS_IDENTITY_API_VERSION=3 + export EXTERNAL_NETWORK=yardstick-public + +and visible. + +If the Openstack Cli appears to hang, then verify the proxys and no_proxy are set correctly. +They should be similar to :: + + FTP_PROXY="http://proxy.ir.intel.com:911/" + HTTPS_PROXY="http://proxy.ir.intel.com:911/" + HTTP_PROXY="http://proxy.ir.intel.com:911/" + NO_PROXY="localhost,127.0.0.1,10.237.222.55,10.237.223.80,10.237.222.134,.ir.intel.com" + ftp_proxy="http://proxy.ir.intel.com:911/" + http_proxy="http://proxy.ir.intel.com:911/" + https_proxy="http://proxy.ir.intel.com:911/" + no_proxy="localhost,127.0.0.1,10.237.222.55,10.237.223.80,10.237.222.134,.ir.intel.com" + +Where + + 1) 10.237.222.55 = IP Address of deployment node + 2) 10.237.223.80 = IP Address of Controller node + 3) 10.237.222.134 = IP Address of Compute Node + 4) ir.intel.com = local no proxy + + + + + + diff --git a/docs/testing/developer/devguide/images/PROX_BNG_QOS.png b/docs/testing/developer/devguide/images/PROX_BNG_QOS.png Binary files differnew file mode 100644 index 000000000..3c720945c --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_BNG_QOS.png diff --git a/docs/testing/developer/devguide/images/PROX_Baremetal_config.png b/docs/testing/developer/devguide/images/PROX_Baremetal_config.png Binary files differnew file mode 100644 index 000000000..5cd914035 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Baremetal_config.png diff --git a/docs/testing/developer/devguide/images/PROX_Gen_2port_cfg.png b/docs/testing/developer/devguide/images/PROX_Gen_2port_cfg.png Binary files differnew file mode 100644 index 000000000..07731cabc --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Gen_2port_cfg.png diff --git a/docs/testing/developer/devguide/images/PROX_Gen_GUI.png b/docs/testing/developer/devguide/images/PROX_Gen_GUI.png Binary files differnew file mode 100644 index 000000000..e96aea3de --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Gen_GUI.png diff --git a/docs/testing/developer/devguide/images/PROX_Handle_2port_cfg.png b/docs/testing/developer/devguide/images/PROX_Handle_2port_cfg.png Binary files differnew file mode 100644 index 000000000..6505bedfd --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Handle_2port_cfg.png diff --git a/docs/testing/developer/devguide/images/PROX_Hardware_Arch.png b/docs/testing/developer/devguide/images/PROX_Hardware_Arch.png Binary files differnew file mode 100644 index 000000000..6e69dd6e3 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Hardware_Arch.png diff --git a/docs/testing/developer/devguide/images/PROX_Openstack_stack_list.png b/docs/testing/developer/devguide/images/PROX_Openstack_stack_list.png Binary files differnew file mode 100644 index 000000000..f67d10e6d --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Openstack_stack_list.png diff --git a/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_a.png b/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_a.png Binary files differnew file mode 100644 index 000000000..00e7620e7 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_a.png diff --git a/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_b.png b/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_b.png Binary files differnew file mode 100644 index 000000000..bbe9b8631 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_b.png diff --git a/docs/testing/developer/devguide/images/PROX_SUT_GUI.png b/docs/testing/developer/devguide/images/PROX_SUT_GUI.png Binary files differnew file mode 100644 index 000000000..204083d1d --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_SUT_GUI.png diff --git a/docs/testing/developer/devguide/images/PROX_Software_Arch.png b/docs/testing/developer/devguide/images/PROX_Software_Arch.png Binary files differnew file mode 100644 index 000000000..c31f1e24a --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Software_Arch.png diff --git a/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png b/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png Binary files differnew file mode 100644 index 000000000..32530eb15 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png diff --git a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png Binary files differnew file mode 100644 index 000000000..754973b4e --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png diff --git a/docs/testing/developer/devguide/images/PROX_Traffic_profile.png b/docs/testing/developer/devguide/images/PROX_Traffic_profile.png Binary files differnew file mode 100644 index 000000000..660bca342 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Traffic_profile.png diff --git a/docs/testing/developer/devguide/images/PROX_Yardstick_config.png b/docs/testing/developer/devguide/images/PROX_Yardstick_config.png Binary files differnew file mode 100644 index 000000000..8d346b03a --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Yardstick_config.png diff --git a/nsb_setup.sh b/nsb_setup.sh index 4a8e4db93..50fc017d1 100755 --- a/nsb_setup.sh +++ b/nsb_setup.sh @@ -63,7 +63,7 @@ for i in "${pkg[@]}"; do fi done -pip install ansible==2.3.2 shade==1.17.0 docker-py==1.10.6 +pip install ansible==2.4.2 shade==1.22.2 docker-py==1.10.6 ANSIBLE_SCRIPTS="ansible" diff --git a/samples/storage_bottlenecks.yaml b/samples/storage_bottlenecks.yaml new file mode 100644 index 000000000..1aa0d7e35 --- /dev/null +++ b/samples/storage_bottlenecks.yaml @@ -0,0 +1,77 @@ +############################################################################## +# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +# Sample benchmark task config file +# measure storage performance using fio +# +# For this sample just like running the command below on the test vm and +# getting benchmark info back to the yardstick. +# +# sudo fio -filename=/home/ubuntu/data.raw -bs=4k -ipdepth=1 -rw=rw \ +# -ramp_time=10 -runtime=60 -name=yardstick-fio -ioengine=libaio \ +# -direct=1 -group_reporting -numjobs=1 -time_based \ +# --output-format=json + +schema: "yardstick:task:0.1" +run_in_parallel: true + +{% set directory = directory or '/FIO_Test' %} +{% set stack_num = stack_num or 1 %} +{% set volume_num = volume_num or "1" %} +{% set rw = rw or "randrw" %} +{% set bs = bs or "4k" %} +{% set size = size or "30g" %} +{% set rwmixwrite = rwmixwrite or "50" %} +{% set numjobs = numjobs or "1" %} +{% set direct = direct or "1" %} +{% set volume_size = volume_size or 50 %} + +scenarios: +{% for num in range(stack_num) %} +- + type: Fio + options: + filename: {{ directory }}/test + directory: {{ directory }} + bs: {{bs}} + rw: {{rw}} + size: {{size}} + rwmixwrite: {{rwmixwrite}} + numjobs: {{numjobs}} + direct: {{direct}} + ramp_time: 10 + + host: demo.storage_bottlenecks-{{num}}-{{volume_num}} + + runner: + type: Duration + duration: 60 + interval: 1 +{% endfor %} + +contexts: +{% for context_num in range(stack_num) %} +- + name: storage_bottlenecks-{{context_num}}-{{volume_num}} + image: yardstick-image + flavor: yardstick-flavor + user: ubuntu + + servers: + demo: + volume: + size: {{volume_size}} + volume_mountpoint: "/dev/vdb" + floating_ip: true + + networks: + test: + cidr: "10.0.1.0/24" + port_security_enabled: true +{% endfor %}
\ No newline at end of file diff --git a/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml index 13fe5a5a5..2e096a126 100644 --- a/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml +++ b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml @@ -15,6 +15,7 @@ --- {% set provider = provider or none %} {% set physical_networks = physical_networks or ['physnet1', 'physnet2'] %} +{% set segmentation_id = segmentation_id or none %} schema: yardstick:task:0.1 scenarios: @@ -74,6 +75,9 @@ context: {% if provider %} provider: {{ provider }} physical_network: {{ physical_networks[0] }} + {% if segmentation_id %} + segmentation_id: {{ segmentation_id }} + {% endif %} {% endif %} port_security_enabled: False enable_dhcp: 'false' @@ -83,6 +87,9 @@ context: {% if provider %} provider: {{ provider }} physical_network: {{ physical_networks[1] }} + {% if segmentation_id %} + segmentation_id: {{ segmentation_id }} + {% endif %} {% endif %} port_security_enabled: False enable_dhcp: 'false' diff --git a/tests/unit/network_services/traffic_profile/test_traffic_profile.py b/tests/unit/network_services/traffic_profile/test_traffic_profile.py index 0bb0a88a6..37b9a08d0 100644 --- a/tests/unit/network_services/traffic_profile/test_traffic_profile.py +++ b/tests/unit/network_services/traffic_profile/test_traffic_profile.py @@ -13,14 +13,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -from __future__ import absolute_import +import ipaddress -import unittest import mock +import six +import unittest from tests.unit import STL_MOCKS +from yardstick.common import exceptions as y_exc STLClient = mock.MagicMock() stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) @@ -215,11 +216,27 @@ class TestTrexProfile(unittest.TestCase): TrexProfile(TrafficProfile) self.assertEqual({}, trex_profile.generate_imix_data(False)) - def test__get_start_end_ipv6(self): - trex_profile = \ - TrexProfile(TrafficProfile) - self.assertRaises(SystemExit, trex_profile._get_start_end_ipv6, - "1.1.1.3", "1.1.1.1") + def test__count_ip_ipv4(self): + start, end, count = TrexProfile._count_ip('1.1.1.1', '1.2.3.4') + self.assertEqual('1.1.1.1', str(start)) + self.assertEqual('1.2.3.4', str(end)) + diff = (int(ipaddress.IPv4Address(six.u('1.2.3.4'))) - + int(ipaddress.IPv4Address(six.u('1.1.1.1')))) + self.assertEqual(diff, count) + + def test__count_ip_ipv6(self): + start_ip = '0064:ff9b:0:0:0:0:9810:6414' + end_ip = '0064:ff9b:0:0:0:0:9810:6420' + start, end, count = TrexProfile._count_ip(start_ip, end_ip) + self.assertEqual(0x98106414, start) + self.assertEqual(0x98106420, end) + self.assertEqual(0x98106420 - 0x98106414, count) + + def test__count_ip_ipv6_exception(self): + start_ip = '0064:ff9b:0:0:0:0:9810:6420' + end_ip = '0064:ff9b:0:0:0:0:9810:6414' + with self.assertRaises(y_exc.IPv6RangeError): + TrexProfile._count_ip(start_ip, end_ip) def test__dscp_range_action_partial_actual_count_zero(self): traffic_profile = TrexProfile(TrafficProfile) @@ -258,13 +275,17 @@ class TestTrexProfile(unittest.TestCase): def test__general_single_action_partial(self): trex_profile = TrexProfile(TrafficProfile) - trex_profile._general_single_action_partial(ETHERNET)(SRC)(self.EXAMPLE_ETHERNET_ADDR) - self.assertEqual(self.EXAMPLE_ETHERNET_ADDR, trex_profile.ether_packet.src) + trex_profile._general_single_action_partial(ETHERNET)(SRC)( + self.EXAMPLE_ETHERNET_ADDR) + self.assertEqual(self.EXAMPLE_ETHERNET_ADDR, + trex_profile.ether_packet.src) - trex_profile._general_single_action_partial(IP)(DST)(self.EXAMPLE_IP_ADDR) + trex_profile._general_single_action_partial(IP)(DST)( + self.EXAMPLE_IP_ADDR) self.assertEqual(self.EXAMPLE_IP_ADDR, trex_profile.ip_packet.dst) - trex_profile._general_single_action_partial(IPv6)(DST)(self.EXAMPLE_IPv6_ADDR) + trex_profile._general_single_action_partial(IPv6)(DST)( + self.EXAMPLE_IPv6_ADDR) self.assertEqual(self.EXAMPLE_IPv6_ADDR, trex_profile.ip6_packet.dst) trex_profile._general_single_action_partial(UDP)(SRC_PORT)(5060) diff --git a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py index af941c04f..25633384e 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py @@ -624,37 +624,34 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase): self.assertIsInstance(dpdk_vnf_setup_env_helper.setup_vnf_environment(), ResourceProfile) - def test__setup_dpdk_early_success(self): - vnfd_helper = VnfdHelper(self.VNFD_0) + def test__setup_dpdk(self): ssh_helper = mock.Mock() - ssh_helper.execute.return_value = 0, 'output', '' - ssh_helper.join_bin_path.return_value = 'joined_path' - ssh_helper.provision_tool.return_value = 'provision string' - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - dpdk_setup_helper._setup_hugepages = mock.Mock() - - self.assertIsNone(dpdk_setup_helper._setup_dpdk()) - self.assertEqual(dpdk_setup_helper.ssh_helper.execute.call_count, 2) - - @mock.patch('yardstick.ssh.SSH') - def test__setup_dpdk_short(self, _): - def execute_side(cmd): - if 'joined_path' in cmd: - return 0, 'output', '' - return 1, 'bad output', 'error output' + ssh_helper.execute = mock.Mock() + ssh_helper.execute.return_value = (0, 0, 0) + dpdk_setup_helper = DpdkVnfSetupEnvHelper(mock.ANY, ssh_helper, mock.ANY) + with mock.patch.object(dpdk_setup_helper, '_setup_hugepages') as \ + mock_setup_hp: + dpdk_setup_helper._setup_dpdk() + mock_setup_hp.assert_called_once() + ssh_helper.execute.assert_has_calls([ + mock.call('sudo modprobe uio && sudo modprobe igb_uio'), + mock.call('lsmod | grep -i igb_uio') + ]) - vnfd_helper = VnfdHelper(self.VNFD_0) + def test__setup_dpdk_igb_uio_not_loaded(self): ssh_helper = mock.Mock() - ssh_helper.execute.side_effect = execute_side - ssh_helper.join_bin_path.return_value = 'joined_path' - ssh_helper.provision_tool.return_value = 'provision string' - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - dpdk_setup_helper._setup_hugepages = mock.Mock() - - self.assertIsNone(dpdk_setup_helper._setup_dpdk()) - self.assertEqual(dpdk_setup_helper.ssh_helper.execute.call_count, 3) + ssh_helper.execute = mock.Mock() + ssh_helper.execute.side_effect = [(0, 0, 0), (1, 0, 0)] + dpdk_setup_helper = DpdkVnfSetupEnvHelper(mock.ANY, ssh_helper, mock.ANY) + with mock.patch.object(dpdk_setup_helper, '_setup_hugepages') as \ + mock_setup_hp: + with self.assertRaises(y_exceptions.DPDKSetupDriverError): + dpdk_setup_helper._setup_dpdk() + mock_setup_hp.assert_called_once() + ssh_helper.execute.assert_has_calls([ + mock.call('sudo modprobe uio && sudo modprobe igb_uio'), + mock.call('lsmod | grep -i igb_uio') + ]) @mock.patch('yardstick.ssh.SSH') def test__setup_resources(self, _): diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py index 4ba543b9e..7b7f1be32 100644 --- a/yardstick/benchmark/contexts/heat.py +++ b/yardstick/benchmark/contexts/heat.py @@ -95,13 +95,15 @@ class HeatContext(Context): return sorted_networks def init(self, attrs): - self.check_environment() """initializes itself from the supplied arguments""" + self.check_environment() self.name = attrs["name"] self._user = attrs.get("user") self.template_file = attrs.get("heat_template") + + self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT) if self.template_file: self.heat_parameters = attrs.get("heat_parameters") return @@ -113,8 +115,6 @@ class HeatContext(Context): self._flavor = attrs.get("flavor") - self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT) - self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"]) for name, pg_attrs in attrs.get( "placement_groups", {}).items()] diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 9b1b3f851..f5d2b18ac 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -57,7 +57,7 @@ class Task(object): # pragma: no cover out_types = [s.strip() for s in dispatchers.split(',')] output_config['DEFAULT']['dispatcher'] = out_types - def start(self, args, **kwargs): + def start(self, args): """Start a benchmark scenario.""" atexit.register(self.atexit_handler) @@ -69,7 +69,7 @@ class Task(object): # pragma: no cover try: output_config = utils.parse_ini_file(CONF_FILE) - except Exception: + except Exception: # pylint: disable=broad-except # all error will be ignore, the default value is {} output_config = {} @@ -120,10 +120,10 @@ class Task(object): # pragma: no cover case_name = os.path.splitext(os.path.basename(task_files[i]))[0] try: - data = self._run(scenarios, run_in_parallel, args.output_file) + data = self._run(scenarios, run_in_parallel, output_config) except KeyboardInterrupt: raise - except Exception: + except Exception: # pylint: disable=broad-except LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True) testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []} else: @@ -232,11 +232,12 @@ class Task(object): # pragma: no cover def _do_output(self, output_config, result): dispatchers = DispatcherBase.get(output_config) + dispatchers = (d for d in dispatchers if d.__dispatcher_type__ != 'Influxdb') for dispatcher in dispatchers: dispatcher.flush_result_data(result) - def _run(self, scenarios, run_in_parallel, output_file): + def _run(self, scenarios, run_in_parallel, output_config): """Deploys context and calls runners""" for context in self.contexts: context.deploy() @@ -247,14 +248,14 @@ class Task(object): # pragma: no cover # Start all background scenarios for scenario in filter(_is_background_scenario, scenarios): scenario["runner"] = dict(type="Duration", duration=1000000000) - runner = self.run_one_scenario(scenario, output_file) + runner = self.run_one_scenario(scenario, output_config) background_runners.append(runner) runners = [] if run_in_parallel: for scenario in scenarios: if not _is_background_scenario(scenario): - runner = self.run_one_scenario(scenario, output_file) + runner = self.run_one_scenario(scenario, output_config) runners.append(runner) # Wait for runners to finish @@ -263,12 +264,12 @@ class Task(object): # pragma: no cover if status != 0: raise RuntimeError( "{0} runner status {1}".format(runner.__execution_type__, status)) - LOG.info("Runner ended, output in %s", output_file) + LOG.info("Runner ended") else: # run serially for scenario in scenarios: if not _is_background_scenario(scenario): - runner = self.run_one_scenario(scenario, output_file) + runner = self.run_one_scenario(scenario, output_config) status = runner_join(runner, background_runners, self.outputs, result) if status != 0: LOG.error('Scenario NO.%s: "%s" ERROR!', @@ -276,7 +277,7 @@ class Task(object): # pragma: no cover scenario.get('type')) raise RuntimeError( "{0} runner status {1}".format(runner.__execution_type__, status)) - LOG.info("Runner ended, output in %s", output_file) + LOG.info("Runner ended") # Abort background runners for runner in background_runners: @@ -313,10 +314,10 @@ class Task(object): # pragma: no cover else: return op - def run_one_scenario(self, scenario_cfg, output_file): + def run_one_scenario(self, scenario_cfg, output_config): """run one scenario using context""" runner_cfg = scenario_cfg["runner"] - runner_cfg['output_filename'] = output_file + runner_cfg['output_config'] = output_config options = scenario_cfg.get('options', {}) scenario_cfg['options'] = self._parse_options(options) diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py index a887fa5b3..99386a440 100755 --- a/yardstick/benchmark/runners/base.py +++ b/yardstick/benchmark/runners/base.py @@ -23,6 +23,7 @@ import multiprocessing import subprocess import time import traceback +from subprocess import CalledProcessError import importlib @@ -30,6 +31,7 @@ from six.moves.queue import Empty import yardstick.common.utils as utils from yardstick.benchmark.scenarios import base as base_scenario +from yardstick.dispatcher.base import Base as DispatcherBase log = logging.getLogger(__name__) @@ -39,7 +41,7 @@ def _execute_shell_command(command): exitcode = 0 try: output = subprocess.check_output(command, shell=True) - except Exception: + except CalledProcessError: exitcode = -1 output = traceback.format_exc() log.error("exec command '%s' error:\n ", command) @@ -137,6 +139,8 @@ class Runner(object): Runner.release(runner) def __init__(self, config): + self.task_id = None + self.case_name = None self.config = config self.periodic_action_process = None self.output_queue = multiprocessing.Queue() @@ -170,6 +174,8 @@ class Runner(object): cls = getattr(module, path_split[-1]) self.config['object'] = class_name + self.case_name = scenario_cfg['tc'] + self.task_id = scenario_cfg['task_id'] self.aborted.clear() # run a potentially configured pre-start action @@ -245,10 +251,24 @@ class Runner(object): def get_result(self): result = [] + + dispatcher = self.config['output_config']['DEFAULT']['dispatcher'] + output_in_influxdb = 'influxdb' in dispatcher + while not self.result_queue.empty(): log.debug("result_queue size %s", self.result_queue.qsize()) try: - result.append(self.result_queue.get(True, 1)) + one_record = self.result_queue.get(True, 1) except Empty: pass + else: + if output_in_influxdb: + self._output_to_influxdb(one_record) + + result.append(one_record) return result + + def _output_to_influxdb(self, record): + dispatchers = DispatcherBase.get(self.config['output_config']) + dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb')) + dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id) diff --git a/yardstick/benchmark/scenarios/lib/delete_network.py b/yardstick/benchmark/scenarios/lib/delete_network.py index e8796bf82..2e8b595f9 100644 --- a/yardstick/benchmark/scenarios/lib/delete_network.py +++ b/yardstick/benchmark/scenarios/lib/delete_network.py @@ -7,14 +7,12 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from __future__ import print_function -from __future__ import absolute_import - import logging from yardstick.benchmark.scenarios import base import yardstick.common.openstack_utils as op_utils + LOG = logging.getLogger(__name__) @@ -30,7 +28,7 @@ class DeleteNetwork(base.Scenario): self.network_id = self.options.get("network_id", None) - self.neutron_client = op_utils.get_neutron_client() + self.shade_client = op_utils.get_shade_client() self.setup_done = False @@ -45,7 +43,7 @@ class DeleteNetwork(base.Scenario): if not self.setup_done: self.setup() - status = op_utils.delete_neutron_net(self.neutron_client, + status = op_utils.delete_neutron_net(self.shade_client, network_id=self.network_id) if status: result.update({"delete_network": 1}) @@ -53,3 +51,4 @@ class DeleteNetwork(base.Scenario): else: result.update({"delete_network": 0}) LOG.error("Delete network failed!") + return status diff --git a/yardstick/common/ansible_common.py b/yardstick/common/ansible_common.py index 9a4426bf9..be262c215 100644 --- a/yardstick/common/ansible_common.py +++ b/yardstick/common/ansible_common.py @@ -33,7 +33,7 @@ from six import StringIO from chainmap import ChainMap from yardstick.common.utils import Timer - +from yardstick.common import constants as consts cgitb.enable(format="text") @@ -435,6 +435,7 @@ class AnsibleCommon(object): ansible_dict = dict(os.environ, **{ "ANSIBLE_LOG_PATH": os.path.join(directory, log_file), "ANSIBLE_LOG_BASE": directory, + "ANSIBLE_ROLES_PATH": consts.ANSIBLE_ROLES_PATH, # # required for SSH to work # "ANSIBLE_SSH_ARGS": "-o UserKnownHostsFile=/dev/null " # "-o GSSAPIAuthentication=no " @@ -516,7 +517,7 @@ class AnsibleCommon(object): # playbook dir: use include to point to files in consts.ANSIBLE_DIR if not os.path.isdir(directory): - raise OSError("Not a directory, %s", directory) + raise OSError("Not a directory, %s" % directory) timeout = self.get_timeout(timeout, self.default_timeout) self.counter += 1 diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py index 646a1f2ca..43c2c19cb 100644 --- a/yardstick/common/constants.py +++ b/yardstick/common/constants.py @@ -83,6 +83,7 @@ YARDSTICK_ROOT_PATH = dirname( TASK_LOG_DIR = get_param('dir.tasklog', '/var/log/yardstick/') CONF_SAMPLE_DIR = join(REPOS_DIR, 'etc/yardstick/') ANSIBLE_DIR = join(REPOS_DIR, 'ansible') +ANSIBLE_ROLES_PATH = join(REPOS_DIR, 'ansible/roles/') SAMPLE_CASE_DIR = join(REPOS_DIR, 'samples') TESTCASE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_cases/') TESTSUITE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_suites/') diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py index e38dd246c..3e0635e46 100644 --- a/yardstick/common/exceptions.py +++ b/yardstick/common/exceptions.py @@ -63,3 +63,10 @@ class HeatTemplateError(YardstickException): """Error in Heat during the stack deployment""" message = ('Error in Heat during the creation of the OpenStack stack ' '"%(stack_name)"') + + +class IPv6RangeError(YardstickException): + message = 'Start IP "%(start_ip)s" is greater than end IP "%(end_ip)s"' + +class DPDKSetupDriverError(YardstickException): + message = '"igb_uio" driver is not loaded' diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py index c5b17c270..8f666e268 100644 --- a/yardstick/common/openstack_utils.py +++ b/yardstick/common/openstack_utils.py @@ -15,6 +15,7 @@ import logging from keystoneauth1 import loading from keystoneauth1 import session import shade +from shade import exc from cinderclient import client as cinderclient from novaclient import client as novaclient @@ -174,6 +175,7 @@ def get_glance_client(): # pragma: no cover def get_shade_client(): return shade.openstack_cloud() + # ********************************************* # NOVA # ********************************************* @@ -272,7 +274,8 @@ def create_aggregate_with_host(nova_client, aggregate_name, av_zone, def create_keypair(name, key_path=None): # pragma: no cover try: with open(key_path) as fpubkey: - keypair = get_nova_client().keypairs.create(name=name, public_key=fpubkey.read()) + keypair = get_nova_client().keypairs.create( + name=name, public_key=fpubkey.read()) return keypair except Exception: # pylint: disable=broad-except log.exception("Error [create_keypair(nova_client)]") @@ -304,9 +307,11 @@ def create_instance_and_wait_for_active(json_body): # pragma: no cover return None -def attach_server_volume(server_id, volume_id, device=None): # pragma: no cover +def attach_server_volume(server_id, volume_id, + device=None): # pragma: no cover try: - get_nova_client().volumes.create_server_volume(server_id, volume_id, device) + get_nova_client().volumes.create_server_volume(server_id, + volume_id, device) except Exception: # pylint: disable=broad-except log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]", server_id, volume_id) @@ -370,7 +375,8 @@ def get_server_by_name(name): # pragma: no cover def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover try: - return get_nova_client().flavors.create(name, ram, vcpus, disk, **kwargs) + return get_nova_client().flavors.create(name, ram, vcpus, + disk, **kwargs) except Exception: # pylint: disable=broad-except log.exception("Error [create_flavor(nova_client, %s, %s, %s, %s, %s)]", name, ram, disk, vcpus, kwargs['is_public']) @@ -455,13 +461,11 @@ def create_neutron_net(neutron_client, json_body): # pragma: no cover raise Exception("operation error") -def delete_neutron_net(neutron_client, network_id): # pragma: no cover +def delete_neutron_net(shade_client, network_id): try: - neutron_client.delete_network(network_id) - return True - except Exception: # pylint: disable=broad-except - log.error("Error [delete_neutron_net(neutron_client, '%s')]", - network_id) + return shade_client.delete_network(network_id) + except exc.OpenStackCloudException: + log.error("Error [delete_neutron_net(shade_client, '%s')]", network_id) return False @@ -558,7 +562,8 @@ def get_security_group_id(neutron_client, sg_name): # pragma: no cover return id -def create_security_group(neutron_client, sg_name, sg_description): # pragma: no cover +def create_security_group(neutron_client, sg_name, + sg_description): # pragma: no cover json_body = {'security_group': {'name': sg_name, 'description': sg_description}} try: @@ -611,8 +616,8 @@ def create_secgroup_rule(neutron_client, sg_id, direction, protocol, return False -def create_security_group_full(neutron_client, - sg_name, sg_description): # pragma: no cover +def create_security_group_full(neutron_client, sg_name, + sg_description): # pragma: no cover sg_id = get_security_group_id(neutron_client, sg_name) if sg_id != '': log.info("Using existing security group '%s'...", sg_name) @@ -670,22 +675,18 @@ def create_image(glance_client, image_name, file_path, disk_format, else: log.info("Creating image '%s' from '%s'...", image_name, file_path) - image = glance_client.images.create(name=image_name, - visibility=public, - disk_format=disk_format, - container_format=container_format, - min_disk=min_disk, - min_ram=min_ram, - tags=tag, - protected=protected, - **kwargs) + image = glance_client.images.create( + name=image_name, visibility=public, disk_format=disk_format, + container_format=container_format, min_disk=min_disk, + min_ram=min_ram, tags=tag, protected=protected, **kwargs) image_id = image.id with open(file_path) as image_data: glance_client.images.upload(image_id, image_data) return image_id except Exception: # pylint: disable=broad-except - log.error("Error [create_glance_image(glance_client, '%s', '%s', '%s')]", - image_name, file_path, public) + log.error( + "Error [create_glance_image(glance_client, '%s', '%s', '%s')]", + image_name, file_path, public) return None @@ -725,7 +726,8 @@ def create_volume(cinder_client, volume_name, volume_size, return None -def delete_volume(cinder_client, volume_id, forced=False): # pragma: no cover +def delete_volume(cinder_client, volume_id, + forced=False): # pragma: no cover try: if forced: try: diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py index 632b433b5..e8c7cf57b 100644 --- a/yardstick/dispatcher/influxdb.py +++ b/yardstick/dispatcher/influxdb.py @@ -11,8 +11,10 @@ from __future__ import absolute_import import logging import time +import os import requests +from requests import ConnectionError from yardstick.common import utils from third_party.influxdb.influxdb_line_protocol import make_lines @@ -38,7 +40,8 @@ class InfluxdbDispatcher(DispatchBase): self.influxdb_url = "%s/write?db=%s" % (self.target, self.db_name) - self.task_id = -1 + self.task_id = None + self.tags = None def flush_result_data(self, data): LOG.debug('Test result all : %s', data) @@ -57,28 +60,41 @@ class InfluxdbDispatcher(DispatchBase): for record in data['tc_data']: # skip results with no data because we influxdb encode empty dicts if record.get("data"): - self._upload_one_record(record, case, tc_criteria) + self.upload_one_record(record, case, tc_criteria) return 0 - def _upload_one_record(self, data, case, tc_criteria): + def upload_one_record(self, data, case, tc_criteria, task_id=None): + if task_id: + self.task_id = task_id + + line = self._data_to_line_protocol(data, case, tc_criteria) + LOG.debug('Test result line format : %s', line) + try: - line = self._data_to_line_protocol(data, case, tc_criteria) - LOG.debug('Test result line format : %s', line) res = requests.post(self.influxdb_url, data=line, auth=(self.username, self.password), timeout=self.timeout) + except ConnectionError as err: + LOG.exception('Failed to record result data: %s', err) + else: if res.status_code != 204: LOG.error('Test result posting finished with status code' ' %d.', res.status_code) LOG.error(res.text) - except Exception as err: - LOG.exception('Failed to record result data: %s', err) - def _data_to_line_protocol(self, data, case, criteria): msg = {} + + if not self.tags: + self.tags = { + 'deploy_scenario': os.environ.get('DEPLOY_SCENARIO', 'unknown'), + 'installer': os.environ.get('INSTALLER_TYPE', 'unknown'), + 'pod_name': os.environ.get('NODE_NAME', 'unknown'), + 'version': os.environ.get('YARDSTICK_BRANCH', 'unknown') + } + point = { "measurement": case, "fields": utils.flatten_dict_key(data["data"]), @@ -93,7 +109,7 @@ class InfluxdbDispatcher(DispatchBase): def _get_nano_timestamp(self, results): try: timestamp = results["timestamp"] - except Exception: + except KeyError: timestamp = time.time() return str(int(float(timestamp) * 1000000000)) diff --git a/yardstick/network_services/traffic_profile/traffic_profile.py b/yardstick/network_services/traffic_profile/traffic_profile.py index 3b19ff9be..8cde5e4a7 100644 --- a/yardstick/network_services/traffic_profile/traffic_profile.py +++ b/yardstick/network_services/traffic_profile/traffic_profile.py @@ -11,16 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" Trex Traffic Profile definitions """ -from __future__ import absolute_import import struct import socket import logging from random import SystemRandom -import six import ipaddress +import six + +from yardstick.common import exceptions as y_exc from yardstick.network_services.traffic_profile.base import TrafficProfile from trex_stl_lib.trex_stl_client import STLStream from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats @@ -78,31 +78,32 @@ class TrexProfile(TrafficProfile): op='inc', step=1) self.vm_flow_vars.append(stl_vm_flow_var) - stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='mac_{}'.format(direction), - pkt_offset='Ether.{}'.format(direction)) + stl_vm_wr_flow_var = STLVmWrFlowVar( + fv_name='mac_{}'.format(direction), + pkt_offset='Ether.{}'.format(direction)) self.vm_flow_vars.append(stl_vm_wr_flow_var) return partial def _ip_range_action_partial(self, direction, count=1): # pylint: disable=unused-argument def partial(min_value, max_value, count): - ip1 = int(ipaddress.IPv4Address(min_value)) - ip2 = int(ipaddress.IPv4Address(max_value)) - actual_count = (ip2 - ip1) + _, _, actual_count = self._count_ip(min_value, max_value) if not actual_count: count = 1 elif actual_count < int(count): count = actual_count - stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="ip4_{}".format(direction), - min_value=min_value, - max_value=max_value, - size=4, - limit=int(count), - seed=0x1235) + stl_vm_flow_var = STLVmFlowVarRepeatableRandom( + name="ip4_{}".format(direction), + min_value=min_value, + max_value=max_value, + size=4, + limit=int(count), + seed=0x1235) self.vm_flow_vars.append(stl_vm_flow_var) - stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip4_{}'.format(direction), - pkt_offset='IP.{}'.format(direction)) + stl_vm_wr_flow_var = STLVmWrFlowVar( + fv_name='ip4_{}'.format(direction), + pkt_offset='IP.{}'.format(direction)) self.vm_flow_vars.append(stl_vm_wr_flow_var) stl_vm_fix_ipv4 = STLVmFixIpv4(offset="IP") self.vm_flow_vars.append(stl_vm_fix_ipv4) @@ -111,7 +112,7 @@ class TrexProfile(TrafficProfile): def _ip6_range_action_partial(self, direction, _): def partial(min_value, max_value, count): # pylint: disable=unused-argument - min_value, max_value = self._get_start_end_ipv6(min_value, max_value) + min_value, max_value, _ = self._count_ip(min_value, max_value) stl_vm_flow_var = STLVmFlowVar(name="ip6_{}".format(direction), min_value=min_value, max_value=max_value, @@ -119,9 +120,10 @@ class TrexProfile(TrafficProfile): op='random', step=1) self.vm_flow_vars.append(stl_vm_flow_var) - stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip6_{}'.format(direction), - pkt_offset='IPv6.{}'.format(direction), - offset_fixup=8) + stl_vm_wr_flow_var = STLVmWrFlowVar( + fv_name='ip6_{}'.format(direction), + pkt_offset='IPv6.{}'.format(direction), + offset_fixup=8) self.vm_flow_vars.append(stl_vm_wr_flow_var) return partial @@ -149,15 +151,17 @@ class TrexProfile(TrafficProfile): elif int(count) > actual_count: count = actual_count - stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="port_{}".format(field), - min_value=min_value, - max_value=max_value, - size=2, - limit=int(count), - seed=0x1235) + stl_vm_flow_var = STLVmFlowVarRepeatableRandom( + name="port_{}".format(field), + min_value=min_value, + max_value=max_value, + size=2, + limit=int(count), + seed=0x1235) self.vm_flow_vars.append(stl_vm_flow_var) - stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_{}'.format(field), - pkt_offset=self.udp[field]) + stl_vm_wr_flow_var = STLVmWrFlowVar( + fv_name='port_{}'.format(field), + pkt_offset=self.udp[field]) self.vm_flow_vars.append(stl_vm_wr_flow_var) return partial @@ -448,20 +452,18 @@ class TrexProfile(TrafficProfile): self.profile = STLProfile(self.streams) @classmethod - def _get_start_end_ipv6(cls, start_ip, end_ip): - try: - ip1 = socket.inet_pton(socket.AF_INET6, start_ip) - ip2 = socket.inet_pton(socket.AF_INET6, end_ip) - hi1, lo1 = struct.unpack('!QQ', ip1) - hi2, lo2 = struct.unpack('!QQ', ip2) - if ((hi1 << 64) | lo1) > ((hi2 << 64) | lo2): - raise SystemExit("IPv6: start_ip is greater then end_ip") - max_p1 = abs(int(lo1) - int(lo2)) - base_p1 = lo1 - except Exception as ex_error: - raise SystemExit(ex_error) - else: - return base_p1, max_p1 + base_p1 + def _count_ip(cls, start_ip, end_ip): + start = ipaddress.ip_address(six.u(start_ip)) + end = ipaddress.ip_address(six.u(end_ip)) + if start.version == 4: + return start, end, int(end) - int(start) + elif start.version == 6: + if int(start) > int(end): + raise y_exc.IPv6RangeError(start_ip=str(start), + end_ip=str(end)) + _, lo1 = struct.unpack('!QQ', start.packed) + _, lo2 = struct.unpack('!QQ', end.packed) + return lo1, lo2, lo2 - lo1 @classmethod def _get_random_value(cls, min_port, max_port): diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py index fbaaa0ca8..d57d7e601 100644 --- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py @@ -250,20 +250,12 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): self.ssh_helper.execute("sudo killall %s" % self.APP_NAME) def _setup_dpdk(self): - """ setup dpdk environment needed for vnf to run """ - + """Setup DPDK environment needed for VNF to run""" self._setup_hugepages() - self.ssh_helper.execute("sudo modprobe uio && sudo modprobe igb_uio") - - exit_status = self.ssh_helper.execute("lsmod | grep -i igb_uio")[0] - if exit_status == 0: - return - - dpdk = self.ssh_helper.join_bin_path(DPDK_VERSION) - dpdk_setup = self.ssh_helper.provision_tool(tool_file="nsb_setup.sh") - exit_status = self.ssh_helper.execute("which {} >/dev/null 2>&1".format(dpdk))[0] - if exit_status != 0: - self.ssh_helper.execute("bash %s dpdk >/dev/null 2>&1" % dpdk_setup) + self.ssh_helper.execute('sudo modprobe uio && sudo modprobe igb_uio') + exit_status = self.ssh_helper.execute('lsmod | grep -i igb_uio')[0] + if exit_status: + raise y_exceptions.DPDKSetupDriverError() def get_collectd_options(self): options = self.scenario_helper.all_options.get("collectd", {}) diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py index 754482e4f..3c3d28146 100644 --- a/yardstick/orchestrator/heat.py +++ b/yardstick/orchestrator/heat.py @@ -74,7 +74,14 @@ class HeatStack(object): if self.uuid is None: return - ret = self._cloud.delete_stack(self.uuid, wait=wait) + try: + ret = self._cloud.delete_stack(self.uuid, wait=wait) + except TypeError: + # NOTE(ralonsoh): this exception catch solves a bug in Shade, which + # tries to retrieve and read the stack status when it's already + # deleted. + ret = True + _DEPLOYED_STACKS.pop(self.uuid) self._stack = None return ret @@ -473,7 +480,36 @@ name (i.e. %s). 'port_range_max': '65535'}, {'remote_ip_prefix': '::/0', 'ethertype': 'IPv6', - 'protocol': 'ipv6-icmp'} + 'protocol': 'ipv6-icmp'}, + {'remote_ip_prefix': '0.0.0.0/0', + 'direction': 'egress', + 'protocol': 'tcp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '0.0.0.0/0', + 'direction': 'egress', + 'protocol': 'udp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '0.0.0.0/0', + 'direction': 'egress', + 'protocol': 'icmp'}, + {'remote_ip_prefix': '::/0', + 'direction': 'egress', + 'ethertype': 'IPv6', + 'protocol': 'tcp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '::/0', + 'direction': 'egress', + 'ethertype': 'IPv6', + 'protocol': 'udp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '::/0', + 'direction': 'egress', + 'ethertype': 'IPv6', + 'protocol': 'ipv6-icmp'}, ] } } diff --git a/yardstick/tests/fixture.py b/yardstick/tests/fixture.py new file mode 100644 index 000000000..94d20eb34 --- /dev/null +++ b/yardstick/tests/fixture.py @@ -0,0 +1,47 @@ +# Copyright 2017 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +import six + +from yardstick.common import task_template + + +class PluginParserFixture(fixtures.Fixture): + """PluginParser fixture. + + This class is intended to be used as a fixture within unit tests and + therefore consumers must register it using useFixture() within their + unit test class. + """ + + def __init__(self, rendered_plugin): + super(PluginParserFixture, self).__init__() + self._rendered_plugin = rendered_plugin + + def _setUp(self): + self.addCleanup(self._restore) + self._mock_tasktemplate_render = mock.patch.object( + task_template.TaskTemplate, 'render') + self.mock_tasktemplate_render = self._mock_tasktemplate_render.start() + self.mock_tasktemplate_render.return_value = self._rendered_plugin + self._mock_open = mock.patch.object(six.moves.builtins, 'open', create=True) + self.mock_open = self._mock_open.start() + self.mock_open.side_effect = mock.mock_open() + + def _restore(self): + self._mock_tasktemplate_render.stop() + self._mock_open.stop() diff --git a/yardstick/tests/unit/benchmark/core/test_plugin.py b/yardstick/tests/unit/benchmark/core/test_plugin.py index 1d6e80574..0d14e4e86 100644 --- a/yardstick/tests/unit/benchmark/core/test_plugin.py +++ b/yardstick/tests/unit/benchmark/core/test_plugin.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - ############################################################################## # Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. # @@ -9,94 +7,136 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Unittest for yardstick.benchmark.core.plugin -from __future__ import absolute_import +import copy import os -from os.path import dirname as dirname +import pkg_resources -try: - from unittest import mock -except ImportError: - import mock -import unittest +import mock +import testtools +from yardstick import ssh from yardstick.benchmark.core import plugin +from yardstick.tests import fixture + +class PluginTestCase(testtools.TestCase): -class Arg(object): + FILE = """ +schema: "yardstick:plugin:0.1" - def __init__(self): - # self.input_file = ('plugin/sample_config.yaml',) - self.input_file = [ - os.path.join(os.path.abspath( - dirname(dirname(dirname(dirname(dirname(dirname(__file__))))))), - 'plugin/sample_config.yaml')] +plugins: + name: sample +deployment: + ip: 10.1.0.50 + user: root + password: root +""" -@mock.patch('yardstick.benchmark.core.plugin.ssh') -class pluginTestCase(unittest.TestCase): + NAME = 'sample' + DEPLOYMENT = {'ip': '10.1.0.50', 'user': 'root', 'password': 'root'} def setUp(self): - self.result = {} - - def test_install(self, mock_ssh): - p = plugin.Plugin() - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - input_file = Arg() - p.install(input_file) - expected_result = {} - self.assertEqual(self.result, expected_result) - - def test_remove(self, mock_ssh): - p = plugin.Plugin() - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - input_file = Arg() - p.remove(input_file) - expected_result = {} - self.assertEqual(self.result, expected_result) - - def test_install_setup_run(self, mock_ssh): - p = plugin.Plugin() - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - plugins = { - "name": "sample" - } - deployment = { - "ip": "10.1.0.50", - "user": "root", - "password": "root" - } - plugin_name = plugins.get("name") - p._install_setup(plugin_name, deployment) - self.assertIsNotNone(p.client) - - p._run(plugin_name) - expected_result = {} - self.assertEqual(self.result, expected_result) - - def test_remove_setup_run(self, mock_ssh): - p = plugin.Plugin() - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - plugins = { - "name": "sample" - } - deployment = { - "ip": "10.1.0.50", - "user": "root", - "password": "root" - } - plugin_name = plugins.get("name") - p._remove_setup(plugin_name, deployment) - self.assertIsNotNone(p.client) - - p._run(plugin_name) - expected_result = {} - self.assertEqual(self.result, expected_result) - - -def main(): - unittest.main() - - -if __name__ == '__main__': - main() + super(PluginTestCase, self).setUp() + self.plugin_parser = plugin.PluginParser(mock.Mock()) + self.plugin = plugin.Plugin() + self.useFixture(fixture.PluginParserFixture(PluginTestCase.FILE)) + + self._mock_ssh_from_node = mock.patch.object(ssh.SSH, 'from_node') + self.mock_ssh_from_node = self._mock_ssh_from_node.start() + self.mock_ssh_obj = mock.Mock() + self.mock_ssh_from_node.return_value = self.mock_ssh_obj + self.mock_ssh_obj.wait = mock.Mock() + self.mock_ssh_obj._put_file_shell = mock.Mock() + + self.addCleanup(self._cleanup) + + def _cleanup(self): + self._mock_ssh_from_node.stop() + + def test_install(self): + args = mock.Mock() + args.input_file = [mock.Mock()] + with mock.patch.object(self.plugin, '_install_setup') as \ + mock_install, \ + mock.patch.object(self.plugin, '_run') as mock_run: + self.plugin.install(args) + mock_install.assert_called_once_with(PluginTestCase.NAME, + PluginTestCase.DEPLOYMENT) + mock_run.assert_called_once_with(PluginTestCase.NAME) + + def test_remove(self): + args = mock.Mock() + args.input_file = [mock.Mock()] + with mock.patch.object(self.plugin, '_remove_setup') as \ + mock_remove, \ + mock.patch.object(self.plugin, '_run') as mock_run: + self.plugin.remove(args) + mock_remove.assert_called_once_with(PluginTestCase.NAME, + PluginTestCase.DEPLOYMENT) + mock_run.assert_called_once_with(PluginTestCase.NAME) + + @mock.patch.object(pkg_resources, 'resource_filename', + return_value='script') + def test__install_setup(self, mock_resource_filename): + plugin_name = 'plugin_name' + self.plugin._install_setup(plugin_name, PluginTestCase.DEPLOYMENT) + mock_resource_filename.assert_called_once_with( + 'yardstick.resources', 'scripts/install/' + plugin_name + '.bash') + self.mock_ssh_from_node.assert_called_once_with( + PluginTestCase.DEPLOYMENT) + self.mock_ssh_obj.wait.assert_called_once_with(timeout=600) + self.mock_ssh_obj._put_file_shell.assert_called_once_with( + 'script', '~/{0}.sh'.format(plugin_name)) + + @mock.patch.object(pkg_resources, 'resource_filename', + return_value='script') + @mock.patch.object(os, 'environ', return_value='1.2.3.4') + def test__install_setup_with_ip_local(self, mock_os_environ, + mock_resource_filename): + plugin_name = 'plugin_name' + deployment = copy.deepcopy(PluginTestCase.DEPLOYMENT) + deployment['ip'] = 'local' + self.plugin._install_setup(plugin_name, deployment) + mock_os_environ.__getitem__.assert_called_once_with('JUMP_HOST_IP') + mock_resource_filename.assert_called_once_with( + 'yardstick.resources', + 'scripts/install/' + plugin_name + '.bash') + self.mock_ssh_from_node.assert_called_once_with( + deployment, overrides={'ip': os.environ["JUMP_HOST_IP"]}) + self.mock_ssh_obj.wait.assert_called_once_with(timeout=600) + self.mock_ssh_obj._put_file_shell.assert_called_once_with( + 'script', '~/{0}.sh'.format(plugin_name)) + + @mock.patch.object(pkg_resources, 'resource_filename', + return_value='script') + def test__remove_setup(self, mock_resource_filename): + plugin_name = 'plugin_name' + self.plugin._remove_setup(plugin_name, PluginTestCase.DEPLOYMENT) + mock_resource_filename.assert_called_once_with( + 'yardstick.resources', + 'scripts/remove/' + plugin_name + '.bash') + self.mock_ssh_from_node.assert_called_once_with( + PluginTestCase.DEPLOYMENT) + self.mock_ssh_obj.wait.assert_called_once_with(timeout=600) + self.mock_ssh_obj._put_file_shell.assert_called_once_with( + 'script', '~/{0}.sh'.format(plugin_name)) + + @mock.patch.object(pkg_resources, 'resource_filename', + return_value='script') + @mock.patch.object(os, 'environ', return_value='1.2.3.4') + def test__remove_setup_with_ip_local(self, mock_os_environ, + mock_resource_filename): + plugin_name = 'plugin_name' + deployment = copy.deepcopy(PluginTestCase.DEPLOYMENT) + deployment['ip'] = 'local' + self.plugin._remove_setup(plugin_name, deployment) + mock_os_environ.__getitem__.assert_called_once_with('JUMP_HOST_IP') + mock_resource_filename.assert_called_once_with( + 'yardstick.resources', + 'scripts/remove/' + plugin_name + '.bash') + self.mock_ssh_from_node.assert_called_once_with( + deployment, overrides={'ip': os.environ["JUMP_HOST_IP"]}) + self.mock_ssh_obj.wait.assert_called_once_with(timeout=600) + self.mock_ssh_obj._put_file_shell.mock_os_environ( + 'script', '~/{0}.sh'.format(plugin_name)) diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py index 3d9a10d88..ee00d8826 100644 --- a/yardstick/tests/unit/benchmark/core/test_task.py +++ b/yardstick/tests/unit/benchmark/core/test_task.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - ############################################################################## # Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. # @@ -9,43 +7,32 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Unittest for yardstick.benchmark.core.task - -from __future__ import print_function - -from __future__ import absolute_import import os -import unittest - -try: - from unittest import mock -except ImportError: - import mock +import mock +import unittest from yardstick.benchmark.core import task from yardstick.common import constants as consts -# pylint: disable=unused-argument -# disable this for now because I keep forgetting mock patch arg ordering - - class TaskTestCase(unittest.TestCase): - @mock.patch('yardstick.benchmark.core.task.Context') - def test_parse_nodes_host_target_same_context(self, mock_context): - nodes = { - "host": "node1.LF", - "target": "node2.LF" + @mock.patch.object(task, 'Context') + def test_parse_nodes_with_context_same_context(self, mock_context): + scenario_cfg = { + "nodes": { + "host": "node1.LF", + "target": "node2.LF" + } } - scenario_cfg = {"nodes": nodes} server_info = { "ip": "10.20.0.3", "user": "root", "key_filename": "/root/.ssh/id_rsa" } mock_context.get_server.return_value = server_info + context_cfg = task.parse_nodes_with_context(scenario_cfg) self.assertEqual(context_cfg["host"], server_info) @@ -57,15 +44,22 @@ class TaskTestCase(unittest.TestCase): t._set_dispatchers(output_config) self.assertEqual(output_config, output_config) - @mock.patch('yardstick.benchmark.core.task.DispatcherBase') + @mock.patch.object(task, 'DispatcherBase') def test__do_output(self, mock_dispatcher): t = task.Task() output_config = {"DEFAULT": {"dispatcher": "file, http"}} - mock_dispatcher.get = mock.MagicMock(return_value=[mock.MagicMock(), - mock.MagicMock()]) + + dispatcher1 = mock.MagicMock() + dispatcher1.__dispatcher_type__ = 'file' + + dispatcher2 = mock.MagicMock() + dispatcher2.__dispatcher_type__ = 'http' + + mock_dispatcher.get = mock.MagicMock(return_value=[dispatcher1, + dispatcher2]) self.assertEqual(None, t._do_output(output_config, {})) - @mock.patch('yardstick.benchmark.core.task.Context') + @mock.patch.object(task, 'Context') def test_parse_networks_from_nodes(self, mock_context): nodes = { 'node1': { @@ -129,9 +123,9 @@ class TaskTestCase(unittest.TestCase): self.assertEqual(mock_context.get_network.call_count, expected_get_network_calls) self.assertDictEqual(networks, expected) - @mock.patch('yardstick.benchmark.core.task.Context') - @mock.patch('yardstick.benchmark.core.task.base_runner') - def test_run(self, mock_base_runner, mock_ctx): + @mock.patch.object(task, 'Context') + @mock.patch.object(task, 'base_runner') + def test_run(self, mock_base_runner, *args): scenario = { 'host': 'athena.demo', 'target': 'ares.demo', @@ -152,8 +146,8 @@ class TaskTestCase(unittest.TestCase): t._run([scenario], False, "yardstick.out") self.assertTrue(runner.run.called) - @mock.patch('yardstick.benchmark.core.task.os') - def test_check_precondition(self, mock_os): + @mock.patch.object(os, 'environ') + def test_check_precondition(self, mock_os_environ): cfg = { 'precondition': { 'installer_type': 'compass', @@ -163,7 +157,7 @@ class TaskTestCase(unittest.TestCase): } t = task.TaskParser('/opt') - mock_os.environ.get.side_effect = ['compass', + mock_os_environ.get.side_effect = ['compass', 'os-nosdn', 'huawei-pod1'] result = t._check_precondition(cfg) @@ -172,82 +166,75 @@ class TaskTestCase(unittest.TestCase): def test_parse_suite_no_constraint_no_args(self): SAMPLE_SCENARIO_PATH = "no_constraint_no_args_scenario_sample.yaml" t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH)) - with mock.patch('yardstick.benchmark.core.task.os.environ', + with mock.patch.object(os, 'environ', new={'NODE_NAME': 'huawei-pod1', 'INSTALLER_TYPE': 'compass'}): task_files, task_args, task_args_fnames = t.parse_suite() - print("files=%s, args=%s, fnames=%s" % (task_files, task_args, - task_args_fnames)) + self.assertEqual(task_files[0], self.change_to_abspath( 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')) self.assertEqual(task_files[1], self.change_to_abspath( 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')) - self.assertEqual(task_args[0], None) - self.assertEqual(task_args[1], None) - self.assertEqual(task_args_fnames[0], None) - self.assertEqual(task_args_fnames[1], None) - @mock.patch('yardstick.benchmark.core.task.os.environ') - def test_parse_suite_no_constraint_with_args(self, mock_environ): + self.assertIsNone(task_args[0]) + self.assertIsNone(task_args[1]) + self.assertIsNone(task_args_fnames[0]) + self.assertIsNone(task_args_fnames[1]) + + def test_parse_suite_no_constraint_with_args(self): SAMPLE_SCENARIO_PATH = "no_constraint_with_args_scenario_sample.yaml" t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH)) - with mock.patch('yardstick.benchmark.core.task.os.environ', + with mock.patch.object(os, 'environ', new={'NODE_NAME': 'huawei-pod1', 'INSTALLER_TYPE': 'compass'}): task_files, task_args, task_args_fnames = t.parse_suite() - print("files=%s, args=%s, fnames=%s" % (task_files, task_args, - task_args_fnames)) + self.assertEqual(task_files[0], self.change_to_abspath( 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')) self.assertEqual(task_files[1], self.change_to_abspath( 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')) - self.assertEqual(task_args[0], None) + self.assertIsNone(task_args[0]) self.assertEqual(task_args[1], '{"host": "node1.LF","target": "node2.LF"}') - self.assertEqual(task_args_fnames[0], None) - self.assertEqual(task_args_fnames[1], None) + self.assertIsNone(task_args_fnames[0]) + self.assertIsNone(task_args_fnames[1]) - @mock.patch('yardstick.benchmark.core.task.os.environ') - def test_parse_suite_with_constraint_no_args(self, mock_environ): + def test_parse_suite_with_constraint_no_args(self): SAMPLE_SCENARIO_PATH = "with_constraint_no_args_scenario_sample.yaml" t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH)) - with mock.patch('yardstick.benchmark.core.task.os.environ', + with mock.patch.object(os, 'environ', new={'NODE_NAME': 'huawei-pod1', 'INSTALLER_TYPE': 'compass'}): task_files, task_args, task_args_fnames = t.parse_suite() - print("files=%s, args=%s, fnames=%s" % (task_files, task_args, - task_args_fnames)) self.assertEqual(task_files[0], self.change_to_abspath( 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')) self.assertEqual(task_files[1], self.change_to_abspath( 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')) - self.assertEqual(task_args[0], None) - self.assertEqual(task_args[1], None) - self.assertEqual(task_args_fnames[0], None) - self.assertEqual(task_args_fnames[1], None) + self.assertIsNone(task_args[0]) + self.assertIsNone(task_args[1]) + self.assertIsNone(task_args_fnames[0]) + self.assertIsNone(task_args_fnames[1]) - @mock.patch('yardstick.benchmark.core.task.os.environ') - def test_parse_suite_with_constraint_with_args(self, mock_environ): + def test_parse_suite_with_constraint_with_args(self): SAMPLE_SCENARIO_PATH = "with_constraint_with_args_scenario_sample.yaml" t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH)) - with mock.patch('yardstick.benchmark.core.task.os.environ', + with mock.patch('os.environ', new={'NODE_NAME': 'huawei-pod1', 'INSTALLER_TYPE': 'compass'}): task_files, task_args, task_args_fnames = t.parse_suite() - print("files=%s, args=%s, fnames=%s" % (task_files, task_args, - task_args_fnames)) + self.assertEqual(task_files[0], self.change_to_abspath( 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')) self.assertEqual(task_files[1], self.change_to_abspath( 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')) - self.assertEqual(task_args[0], None) + self.assertIsNone(task_args[0]) self.assertEqual(task_args[1], '{"host": "node1.LF","target": "node2.LF"}') - self.assertEqual(task_args_fnames[0], None) - self.assertEqual(task_args_fnames[1], None) + self.assertIsNone(task_args_fnames[0]) + self.assertIsNone(task_args_fnames[1]) def test_parse_options(self): options = { 'openstack': { 'EXTERNAL_NETWORK': '$network' }, - 'ndoes': ['node1', '$node'], + 'nodes': ['node1', '$node'], 'host': '$host' } @@ -258,48 +245,50 @@ class TaskTestCase(unittest.TestCase): 'host': 'server.yardstick' } - idle_result = { + expected_result = { 'openstack': { 'EXTERNAL_NETWORK': 'ext-net' }, - 'ndoes': ['node1', 'node2'], + 'nodes': ['node1', 'node2'], 'host': 'server.yardstick' } actual_result = t._parse_options(options) - self.assertEqual(idle_result, actual_result) + self.assertEqual(expected_result, actual_result) + def test_change_server_name_host_str(self): scenario = {'host': 'demo'} suffix = '-8' task.change_server_name(scenario, suffix) - self.assertTrue(scenario['host'], 'demo-8') + self.assertEqual('demo-8', scenario['host']) def test_change_server_name_host_dict(self): scenario = {'host': {'name': 'demo'}} suffix = '-8' task.change_server_name(scenario, suffix) - self.assertTrue(scenario['host']['name'], 'demo-8') + self.assertEqual('demo-8', scenario['host']['name']) def test_change_server_name_target_str(self): scenario = {'target': 'demo'} suffix = '-8' task.change_server_name(scenario, suffix) - self.assertTrue(scenario['target'], 'demo-8') + self.assertEqual('demo-8', scenario['target']) def test_change_server_name_target_dict(self): scenario = {'target': {'name': 'demo'}} suffix = '-8' task.change_server_name(scenario, suffix) - self.assertTrue(scenario['target']['name'], 'demo-8') + self.assertEqual('demo-8', scenario['target']['name']) - @mock.patch('yardstick.benchmark.core.task.utils') - @mock.patch('yardstick.benchmark.core.task.logging') - def test_set_log(self, mock_logging, mock_utils): + @mock.patch('six.moves.builtins.open', side_effect=mock.mock_open()) + @mock.patch.object(task, 'utils') + @mock.patch('logging.root') + def test_set_log(self, mock_logging_root, *args): task_obj = task.Task() task_obj.task_id = 'task_id' task_obj._set_log() - self.assertTrue(mock_logging.root.addHandler.called) + mock_logging_root.addHandler.assert_called() def _get_file_abspath(self, filename): curr_path = os.path.dirname(os.path.abspath(__file__)) diff --git a/yardstick/tests/unit/benchmark/runner/test_base.py b/yardstick/tests/unit/benchmark/runner/test_base.py index 0fdc42347..59739c54f 100644 --- a/yardstick/tests/unit/benchmark/runner/test_base.py +++ b/yardstick/tests/unit/benchmark/runner/test_base.py @@ -11,6 +11,8 @@ import time import mock import unittest +from subprocess import CalledProcessError + from yardstick.benchmark.runners import base from yardstick.benchmark.runners import iteration @@ -20,19 +22,19 @@ class ActionTestCase(unittest.TestCase): @mock.patch("yardstick.benchmark.runners.base.subprocess") def test__execute_shell_command(self, mock_subprocess): - mock_subprocess.check_output.side_effect = Exception() + mock_subprocess.check_output.side_effect = CalledProcessError(-1, '') self.assertEqual(base._execute_shell_command("")[0], -1) @mock.patch("yardstick.benchmark.runners.base.subprocess") def test__single_action(self, mock_subprocess): - mock_subprocess.check_output.side_effect = Exception() + mock_subprocess.check_output.side_effect = CalledProcessError(-1, '') base._single_action(0, "echo", mock.MagicMock()) @mock.patch("yardstick.benchmark.runners.base.subprocess") def test__periodic_action(self, mock_subprocess): - mock_subprocess.check_output.side_effect = Exception() + mock_subprocess.check_output.side_effect = CalledProcessError(-1, '') base._periodic_action(0, "echo", mock.MagicMock()) @@ -40,7 +42,14 @@ class ActionTestCase(unittest.TestCase): class RunnerTestCase(unittest.TestCase): def setUp(self): - self.runner = iteration.IterationRunner({}) + config = { + 'output_config': { + 'DEFAULT': { + 'dispatcher': 'file' + } + } + } + self.runner = iteration.IterationRunner(config) @mock.patch("yardstick.benchmark.runners.iteration.multiprocessing") def test_get_output(self, *args): diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py index 5f11713fa..aef99ee94 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py @@ -6,30 +6,44 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## + +from oslo_utils import uuidutils import unittest import mock -from yardstick.benchmark.scenarios.lib.delete_network import DeleteNetwork +import yardstick.common.openstack_utils as op_utils +from yardstick.benchmark.scenarios.lib import delete_network class DeleteNetworkTestCase(unittest.TestCase): - @mock.patch('yardstick.common.openstack_utils.get_neutron_client') - @mock.patch('yardstick.common.openstack_utils.delete_neutron_net') - def test_delete_network(self, mock_get_neutron_client, mock_delete_neutron_net): - options = { - 'network_id': '123-123-123' - } - args = {"options": options} - obj = DeleteNetwork(args, {}) - obj.run({}) - self.assertTrue(mock_get_neutron_client.called) - self.assertTrue(mock_delete_neutron_net.called) - - -def main(): - unittest.main() - - -if __name__ == '__main__': - main() + def setUp(self): + self._mock_delete_neutron_net = mock.patch.object( + op_utils, 'delete_neutron_net') + self.mock_delete_neutron_net = self._mock_delete_neutron_net.start() + self._mock_get_shade_client = mock.patch.object( + op_utils, 'get_shade_client') + self.mock_get_shade_client = self._mock_get_shade_client.start() + self._mock_log = mock.patch.object(delete_network, 'LOG') + self.mock_log = self._mock_log.start() + _uuid = uuidutils.generate_uuid() + self.args = {'options': {'network_id': _uuid}} + self._del_obj = delete_network.DeleteNetwork(self.args, mock.ANY) + + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_delete_neutron_net.stop() + self._mock_get_shade_client.stop() + self._mock_log.stop() + + def test_run(self): + self.mock_delete_neutron_net.return_value = True + self.assertTrue(self._del_obj.run({})) + self.mock_log.info.assert_called_once_with( + "Delete network successful!") + + def test_run_fail(self): + self.mock_delete_neutron_net.return_value = False + self.assertFalse(self._del_obj.run({})) + self.mock_log.error.assert_called_once_with("Delete network failed!") diff --git a/yardstick/tests/unit/common/test_openstack_utils.py b/yardstick/tests/unit/common/test_openstack_utils.py index b685e63be..8a2f5f95b 100644 --- a/yardstick/tests/unit/common/test_openstack_utils.py +++ b/yardstick/tests/unit/common/test_openstack_utils.py @@ -11,6 +11,7 @@ from oslo_utils import uuidutils import unittest import mock +from shade import exc from yardstick.common import openstack_utils @@ -54,3 +55,31 @@ class GetNetworkIdTestCase(unittest.TestCase): output = openstack_utils.get_network_id(mock_shade_client, 'network_name') self.assertEqual(None, output) + + +class DeleteNeutronNetTestCase(unittest.TestCase): + + def setUp(self): + self.mock_shade_client = mock.Mock() + self.mock_shade_client.delete_network = mock.Mock() + + def test_delete_neutron_net(self): + self.mock_shade_client.delete_network.return_value = True + output = openstack_utils.delete_neutron_net(self.mock_shade_client, + 'network_id') + self.assertTrue(output) + + def test_delete_neutron_net_fail(self): + self.mock_shade_client.delete_network.return_value = False + output = openstack_utils.delete_neutron_net(self.mock_shade_client, + 'network_id') + self.assertFalse(output) + + @mock.patch.object(openstack_utils, 'log') + def test_delete_neutron_net_exception(self, mock_logger): + self.mock_shade_client.delete_network.side_effect = ( + exc.OpenStackCloudException('error message')) + output = openstack_utils.delete_neutron_net(self.mock_shade_client, + 'network_id') + self.assertFalse(output) + mock_logger.error.assert_called_once() diff --git a/yardstick/tests/unit/orchestrator/test_heat.py b/yardstick/tests/unit/orchestrator/test_heat.py index f53c9b78c..e0a353812 100644 --- a/yardstick/tests/unit/orchestrator/test_heat.py +++ b/yardstick/tests/unit/orchestrator/test_heat.py @@ -89,6 +89,18 @@ class HeatStackTestCase(unittest.TestCase): self.assertFalse(heat._DEPLOYED_STACKS) self.mock_stack_delete.assert_called_once_with(id, wait=True) + def test_delete_bug_in_shade(self): + id = uuidutils.generate_uuid() + self.heatstack._stack = FakeStack( + outputs=mock.Mock(), status=mock.Mock(), id=id) + heat._DEPLOYED_STACKS[id] = self.heatstack._stack + self.mock_stack_delete.side_effect = TypeError() + + ret = self.heatstack.delete(wait=True) + self.assertTrue(ret) + self.assertFalse(heat._DEPLOYED_STACKS) + self.mock_stack_delete.assert_called_once_with(id, wait=True) + class HeatTemplateTestCase(unittest.TestCase): |