diff options
66 files changed, 1398 insertions, 821 deletions
diff --git a/ansible/install.yaml b/ansible/install.yaml index d1745798c..e93232d06 100644 --- a/ansible/install.yaml +++ b/ansible/install.yaml @@ -35,6 +35,7 @@ - install_yardstick - configure_uwsgi - configure_nginx + - configure_gui - download_trex - install_trex - configure_rabbitmq diff --git a/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 b/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 index c049daf84..044f42acb 100644 --- a/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 +++ b/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 @@ -1,7 +1,7 @@ [uwsgi] master = true debug = true -chdir = {{ yardstick_dir }}api +chdir = {{ yardstick_dir }}/api module = server plugins = python processes = 10 @@ -15,4 +15,4 @@ close-on-exec = 1 daemonize = {{ log_dir }}uwsgi.log socket = {{ socket_file }} {# If virtual environment, we need to add: - virtualenv = <virtual_env> #}
\ No newline at end of file + virtualenv = <virtual_env> #} diff --git a/ansible/roles/download_dpdk/defaults/main.yml b/ansible/roles/download_dpdk/defaults/main.yml index d548280f5..885eebf03 100644 --- a/ansible/roles/download_dpdk/defaults/main.yml +++ b/ansible/roles/download_dpdk/defaults/main.yml @@ -1,14 +1,18 @@ --- -dpdk_version: "17.02" -dpdk_url: "http://dpdk.org/browse/dpdk/snapshot/dpdk-{{ dpdk_version }}.tar.gz" +dpdk_version: "17.02.1" +dpdk_url: "http://fast.dpdk.org/rel/dpdk-{{ dpdk_version }}.tar.xz" dpdk_file: "{{ dpdk_url|basename }}" -dpdk_unarchive: "{{ dpdk_file|regex_replace('[.]tar[.]gz$', '') }}" +dpdk_unarchive: "{{ dpdk_file|regex_replace('[.]tar[.]xz$', '') }}" dpdk_dest: "{{ clone_dest }}/" -#Note DPDK 17.08 17.11 and 18.02 are currently unsupported due to prox build issues -dpdk_sha256s: - "16.07": "sha256:d876e4b2a7101f28e7e345d3c88e66afe877d15f0159c19c5bc5bc26b7b7d788" - "17.02": "sha256:b07b546e910095174bdb6152bb0d7ce057cc4b79aaa74771aeee4e8a7219fb38" - "17.05": "sha256:763bfb7e1765efcc949e79d645dc9f1ebd16591431ba0db5ce22becd928dcd0a" - "17.08": "sha256:3a08addbff45c636538514e9a5838fb91ea557661a4c071e03a9a6987d46e5b6" #unsupported - "17.11": "sha256:77a727bb3834549985f291409c9a77a1e8be1c9329ce4c3eb19a22d1461022e4" #unsupported - "18.02": "sha256:f1210310fd5f01a3babe3a09d9b3e5a9db791c2ec6ecfbf94ade9f893a0632b8" #unsupported + +#NOTE(ralonsoh): DPDK > 17.02 are currently unsupported due to prox build issues +dpdk_md5: + "16.07.2": "md5:4922ea2ec935b64ff5c191fec53344a6" + "16.11.7": "md5:c081d113dfd57633e3bc3ebc802691be" + "17.02.1": "md5:cbdf8b7a92ce934d47c38cbc9c20c54a" + "17.05": "md5:0a68c31cd6a6cabeed0a4331073e4c05" #Ubuntu 17.10 support + "17.05.2": "md5:37afc9ce410d8e6945a1beb173074003" #unsupported + "17.08.2": "md5:dd239a878c8c40cf482fdfe438f8d99c" #unsupported + "17.11.3": "md5:68ca84ac878011acf44e75d33b46f55b" #unsupported + "18.02.2": "md5:75ad6d39b513649744e49c9fcbbb9ca5" #unsupported + "18.05": "md5:9fc86367cd9407ff6a8dfea56c4eddc4" #unsupported diff --git a/ansible/roles/download_dpdk/tasks/main.yml b/ansible/roles/download_dpdk/tasks/main.yml index bcb5dde1a..bea3febed 100644 --- a/ansible/roles/download_dpdk/tasks/main.yml +++ b/ansible/roles/download_dpdk/tasks/main.yml @@ -25,7 +25,7 @@ url: "{{ dpdk_url }}" dest: "{{ dpdk_dest }}" validate_certs: False - checksum: "{{ dpdk_sha256s[dpdk_version] }}" + checksum: "{{ dpdk_md5[dpdk_version] }}" - unarchive: src: "{{ dpdk_dest }}/{{ dpdk_file }}" diff --git a/ansible/roles/download_samplevnfs/defaults/main.yml b/ansible/roles/download_samplevnfs/defaults/main.yml index e40eb67c0..c5e880e57 100644 --- a/ansible/roles/download_samplevnfs/defaults/main.yml +++ b/ansible/roles/download_samplevnfs/defaults/main.yml @@ -1,4 +1,4 @@ --- samplevnf_url: "https://git.opnfv.org/samplevnf" samplevnf_dest: "{{ clone_dest }}/samplevnf" -samplevnf_version: "stable/euphrates" +samplevnf_version: "stable/fraser" diff --git a/ansible/roles/install_yardstick/tasks/main.yml b/ansible/roles/install_yardstick/tasks/main.yml index ee1b83756..973b2b027 100644 --- a/ansible/roles/install_yardstick/tasks/main.yml +++ b/ansible/roles/install_yardstick/tasks/main.yml @@ -37,10 +37,39 @@ # name: pip # state: latest -- name: install yardstick without virtual environment - include_tasks: regular_install.yml +- name: Install Yardstick requirements (venv) + pip: + requirements: "{{ yardstick_dir }}/requirements.txt" + virtualenv: "{{ yardstick_dir }}/virtualenv" + async: 300 + poll: 0 + register: pip_installer + when: virtual_environment == True + +- name: Install Yardstick requirements + pip: + requirements: "{{ yardstick_dir }}/requirements.txt" + async: 300 + poll: 0 + register: pip_installer when: virtual_environment == False -- name: install yardstick with virtual environment - include_tasks: virtual_install.yml +- name: Check install Yardstick requirements + async_status: + jid: "{{ pip_installer.ansible_job_id }}" + register: job_result + until: job_result.finished + retries: 100 + +- name: Install Yardstick code (venv) + pip: + name: "{{ yardstick_dir }}/" + editable: True + virtualenv: "{{ yardstick_dir }}/virtualenv" when: virtual_environment == True + +- name: Install Yardstick code + pip: + name: "{{ yardstick_dir }}/" + editable: True + when: virtual_environment == False diff --git a/ansible/roles/install_yardstick/tasks/regular_install.yml b/ansible/roles/install_yardstick/tasks/regular_install.yml deleted file mode 100644 index cd0e86fb9..000000000 --- a/ansible/roles/install_yardstick/tasks/regular_install.yml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2018 Intel Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ---- -- name: Install Yardstick requirements - pip: - requirements: "{{ yardstick_dir }}/requirements.txt" - -- name: Install Yardstick code - pip: - name: "." - extra_args: -e - chdir: "{{ yardstick_dir }}/" diff --git a/ansible/roles/install_yardstick/tasks/virtual_install.yml b/ansible/roles/install_yardstick/tasks/virtual_install.yml deleted file mode 100644 index 8545acbcb..000000000 --- a/ansible/roles/install_yardstick/tasks/virtual_install.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2018 Intel Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ---- -- name: Install Yardstick requirements - pip: - requirements: "{{ yardstick_dir }}/requirements.txt" - virtualenv: "{{ yardstick_dir }}/virtualenv" - -- name: Install Yardstick code - pip: - name: "{{ yardstick_dir }}/." - extra_args: -e - virtualenv: "{{ yardstick_dir }}/virtualenv" - diff --git a/docker/Dockerfile b/docker/Dockerfile index 7f85cbd7f..097bc3c3f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -26,7 +26,7 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \ RUN apt-get update && apt-get install -y git python python-setuptools python-pip iputils-ping && apt-get -y autoremove && apt-get clean RUN easy_install -U setuptools==30.0.0 -RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2 +RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.12.0 python-heatclient==1.11.0 ansible==2.5.5 RUN mkdir -p ${REPOS_DIR} @@ -40,7 +40,7 @@ RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${Y RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR} RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${STORPERF_REPO_DIR} -RUN ansible-playbook -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml +RUN ansible-playbook -i ${YARDSTICK_REPO_DIR}/ansible/install-inventory.ini -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml RUN ${YARDSTICK_REPO_DIR}/docker/supervisor.sh diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch index 21095cbe3..ef41cba03 100644 --- a/docker/Dockerfile.aarch64.patch +++ b/docker/Dockerfile.aarch64.patch @@ -31,7 +31,7 @@ index 62ea0d0..f2f41771 100644 +RUN apt-get update && apt-get install -y git python python-setuptools python-pip iputils-ping && apt-get -y autoremove && \ + apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean RUN easy_install -U setuptools==30.0.0 - RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2 + RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.12.0 python-heatclient==1.11.0 ansible==2.5.5 @@ -48,8 +49,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf # nginx=5000, rabbitmq=5672 diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst index 7ea2616e4..daa4b8187 100644 --- a/docs/release/release-notes/release-notes.rst +++ b/docs/release/release-notes/release-notes.rst @@ -36,6 +36,12 @@ Version History | *Date* | *Version* | *Comment* | | | | | +-------------------+-----------+---------------------------------+ +| Jul 2, 2018 | 6.2.1 | Yardstick for Fraser release | +| | | | ++-------------------+-----------+---------------------------------+ +| Jun 29, 2018 | 6.2.0 | Yardstick for Fraser release | +| | | | ++-------------------+-----------+---------------------------------+ | May 25, 2018 | 6.1.0 | Yardstick for Fraser release | | | | | +-------------------+-----------+---------------------------------+ @@ -120,19 +126,19 @@ Release Data | **Project** | Yardstick | | | | +--------------------------------+-----------------------+ -| **Repo/tag** | yardstick/opnfv-6.1.0 | +| **Repo/tag** | yardstick/opnfv-6.2.0 | | | | +--------------------------------+-----------------------+ -| **Yardstick Docker image tag** | opnfv-6.1.0 | +| **Yardstick Docker image tag** | opnfv-6.2.0 | | | | +--------------------------------+-----------------------+ | **Release designation** | Fraser | | | | +--------------------------------+-----------------------+ -| **Release date** | May 25, 2018 | +| **Release date** | Jun 29, 2018 | | | | +--------------------------------+-----------------------+ -| **Purpose of the delivery** | OPNFV Fraser 6.1.0 | +| **Purpose of the delivery** | OPNFV Fraser 6.2.0 | | | | +--------------------------------+-----------------------+ @@ -151,7 +157,7 @@ Documents Software Deliverables --------------------- - - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-6.1.0) + - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-6.2.0) List of Contexts ^^^^^^^^^^^^^^^^ @@ -391,6 +397,110 @@ Known Issues/Faults Corrected Faults ---------------- +Fraser 6.2.1: + ++--------------------+--------------------------------------------------------------------------+ +| **JIRA REFERENCE** | **DESCRIPTION** | ++====================+==========================================================================+ +| YARDSTICK-1147 | Fix ansible scripts for running in container | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1157 | Bug Fix: correct the file path to build docker file | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1276 | Bugfix: docker build failed | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1280 | Bugfix: uwsgi config file yardstick.ini output error | ++--------------------+--------------------------------------------------------------------------+ + +Fraser 6.2.0: + ++--------------------+--------------------------------------------------------------------------+ +| **JIRA REFERENCE** | **DESCRIPTION** | ++====================+==========================================================================+ +| YARDSTICK-1246 | Update pmd/lcore mask for OVS-DPDK context | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-837 | Move tests: unit/network_services/{lib/,collector/,*.py} | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1144 | Correctly set PYTHONPATH in Dockerfile | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1205 | Set "cmd2" library to version 0.8.6 | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1204 | Bump oslo.messaging version to 5.36.0 | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1210 | Remove __init__ method overriding in HeatContextTestCase | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1189 | Error when adding SR-IOV interfaces in SR-IOV context | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1214 | Remove AnsibleCommon class method mock | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1159 | Add --hwlb options as a command line argument for SampleVNF | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1203 | Add scale out TCs with availability zone support | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1167 | Do not start collectd twice when SampleVNF is running on Baremetal | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1188 | Add "host_name_separator" variable to Context class | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1112 | MQ startup process refactor | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1229 | Cleanup BaseMonitor unit tests | ++--------------------+--------------------------------------------------------------------------+ +| - | Configure ACL via static file | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1191 | Use TRex release v2.41 to support both x86 and aarch64 | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1106 | Add IxNetwork API Python Binding package | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1224 | Cleanup TestYardstickNSCli class | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1225 | Remove print out of logger exception in TestUtils | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1194 | Add "duration" parameter to test case definition | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1209 | Remove instantiated contexts in "test_task" | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1192 | Standalone XML machine type is not longer valid | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1197 | Refactor RFC2455 TRex traffic profile injection | ++--------------------+--------------------------------------------------------------------------+ +| - | Fix "os.path" mock problems during tests | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1218 | Refactor "utils.parse_ini_file" testing | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1179 | Start nginx and uwsgi servicies only in not container mode | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1177 | Install dependencies: bare-metal, standalone | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1126 | Migrate install.sh script to ansible | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1146 | Fix nsb_setup.sh script | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1247 | NSB setup inventory name changed | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1116 | Changed IxNextgen library load in IXIA RFC2544 traffic generator call. | ++--------------------+--------------------------------------------------------------------------+ +| - | Corrected scale-up command line arguments | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-878 | OpenStack client replacement | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1222 | Bugfix: HA kill process recovery has a conflict | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1139 | Add "os_cloud_config" as a new context flag parameter | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1255 | Extended Context class with get_physical_nodes functionality | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1244 | NSB NFVi BNG test fails to run - stops after one step | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1219 | Decrease Sampling interval | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1101 | NSB NFVi PROX BNG losing many packets | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1217 | Fix NSB NfVi support for 25 and 40Gbps | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1185 | NSB Topology fix for Prox 4 port test case | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-966 | Convert SLA asserts to raises | ++--------------------+--------------------------------------------------------------------------+ + Fraser 6.1.0: +--------------------+--------------------------------------------------------------------------+ diff --git a/docs/testing/developer/devguide/devguide.rst b/docs/testing/developer/devguide/devguide.rst index 04d5350be..dbe92b846 100755 --- a/docs/testing/developer/devguide/devguide.rst +++ b/docs/testing/developer/devguide/devguide.rst @@ -1,16 +1,42 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + Convention for heading levels in Yardstick documentation: + + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + + Avoid deeper levels because they do not render well. + Introduction -============= +------------ -Yardstick is a project dealing with performance testing. Yardstick produces its own test cases but can also be considered as a framework to support feature project testing. +Yardstick is a project dealing with performance testing. Yardstick produces +its own test cases but can also be considered as a framework to support feature +project testing. -Yardstick developed a test API that can be used by any OPNFV project. Therefore there are many ways to contribute to Yardstick. +Yardstick developed a test API that can be used by any OPNFV project. Therefore +there are many ways to contribute to Yardstick. You can: * Develop new test cases * Review codes * Develop Yardstick API / framework -* Develop Yardstick grafana dashboards and Yardstick reporting page +* Develop Yardstick grafana dashboards and Yardstick reporting page * Write Yardstick documentation This developer guide describes how to interact with the Yardstick project. @@ -19,28 +45,30 @@ part is a list of “How to” to help you to join the Yardstick family whatever your field of interest is. Where can I find some help to start? --------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. _`user guide`: http://artifacts.opnfv.org/yardstick/danube/1.0/docs/stesting_user_userguide/index.html .. _`wiki page`: https://wiki.opnfv.org/display/yardstick/ This guide is made for you. You can have a look at the `user guide`_. There are also references on documentation, video tutorials, tips in the -project `wiki page`_. You can also directly contact us by mail with [Yardstick] prefix in the title at opnfv-tech-discuss@lists.opnfv.org or on the IRC chan #opnfv-yardstick. +project `wiki page`_. You can also directly contact us by mail with [Yardstick] +prefix in the subject at opnfv-tech-discuss@lists.opnfv.org or on the IRC chan +#opnfv-yardstick. Yardstick developer areas -========================== +------------------------- Yardstick framework --------------------- +~~~~~~~~~~~~~~~~~~~ -Yardstick can be considered as a framework. Yardstick is release as a docker +Yardstick can be considered as a framework. Yardstick is released as a docker file, including tools, scripts and a CLI to prepare the environement and run -tests. It simplifies the integration of external test suites in CI pipeline -and provide commodity tools to collect and display results. +tests. It simplifies the integration of external test suites in CI pipelines +and provides commodity tools to collect and display results. -Since Danube, test categories also known as tiers have been created to group +Since Danube, test categories (also known as tiers) have been created to group similar tests, provide consistant sub-lists and at the end optimize test duration for CI (see How To section). @@ -56,44 +84,54 @@ The tiers are: How Todos? -=========== +---------- How Yardstick works? ---------------------- +~~~~~~~~~~~~~~~~~~~~ The installation and configuration of the Yardstick is described in the `user guide`_. How to work with test cases? ----------------------------- - +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -**Sample Test cases** +Sample Test cases ++++++++++++++++++ -Yardstick provides many sample test cases which are located at "samples" directory of repo. +Yardstick provides many sample test cases which are located at ``samples`` directory of repo. -Sample test cases are designed as following goals: +Sample test cases are designed with the following goals: -1. Helping user better understand yardstick features(including new feature and new test capacity). +1. Helping user better understand Yardstick features (including new feature and + new test capacity). -2. Helping developer to debug his new feature and test case before it is offical released. +2. Helping developer to debug a new feature and test case before it is + offically released. -3. Helping other developers understand and verify the new patch before the patch merged. +3. Helping other developers understand and verify the new patch before the + patch is merged. -So developers should upload your sample test case as well when they are trying to upload a new patch which is about the yardstick new test case or new feature. +Developers should upload their sample test cases as well when they are +uploading a new patch which is about the Yardstick new test case or new feature. -**OPNFV Release Test cases** +OPNFV Release Test cases +++++++++++++++++++++++++ -OPNFV Release test cases which are located at "tests/opnfv/test_cases" of repo. -those test cases are runing by OPNFV CI jobs, It means those test cases should be more mature than sample test cases. -OPNFV scenario owners can select related test cases and add them into the test suites which is represent the scenario. +OPNFV Release test cases are located at ``yardstick/tests/opnfv/test_cases``. +These test cases are run by OPNFV CI jobs, which means these test cases should +be more mature than sample test cases. +OPNFV scenario owners can select related test cases and add them into the test +suites which represent their scenario. -**Test case Description File** +Test case Description File +++++++++++++++++++++++++++ This section will introduce the meaning of the Test case description file. -we will use ping.yaml as a example to show you how to understand the test case description file. -In this Yaml file, you can easily find it consists of two sections. One is “Scenarios”, the other is “Context”.:: +we will use ping.yaml as a example to show you how to understand the test case +description file. +This ``yaml`` file consists of two sections. One is ``scenarios``, the other +is ``context``.:: --- # Sample benchmark task config file @@ -150,18 +188,32 @@ In this Yaml file, you can easily find it consists of two sections. One is “Sc {% endif %} -"Contexts" section is the description of pre-condition of testing. As ping.yaml shown, you can configure the image, flavor , name ,affinity and network of Test VM(servers), with this section, you will get a pre-condition env for Testing. -Yardstick will automatic setup the stack which are described in this section. -In fact, yardstick use convert this section to heat template and setup the VMs by heat-client (Meanwhile, yardstick can support to convert this section to Kubernetes template to setup containers). - -Two Test VMs(athena and ares) are configured by keyword "servers". -"flavor" will determine how many vCPU, how much memory for test VMs. -As "yardstick-flavor" is a basic flavor which will be automatically created when you run command "yardstick env prepare". "yardstick-flavor" is "1 vCPU 1G RAM,3G Disk". -"image" is the image name of test VMs. if you use cirros.3.5.0, you need fill the username of this image into "user". the "policy" of placement of Test VMs have two values (affinity and availability). -"availability" means anti-affinity. In "network" section, you can configure which provide network and physical_network you want Test VMs use. -you may need to configure segmentation_id when your network is vlan. - -Moreover, you can configure your specific flavor as below, yardstick will setup the stack for you. :: +The ``contexts`` section is the description of pre-condition of testing. As +``ping.yaml`` shows, you can configure the image, flavor, name, affinity and +network of Test VM (servers), with this section, you will get a pre-condition +env for Testing. +Yardstick will automatically setup the stack which are described in this +section. +Yardstick converts this section to heat template and sets up the VMs with +heat-client (Yardstick can also support to convert this section to Kubernetes +template to setup containers). + +In the examples above, two Test VMs (athena and ares) are configured by +keyword ``servers``. +``flavor`` will determine how many vCPU, how much memory for test VMs. +As ``yardstick-flavor`` is a basic flavor which will be automatically created +when you run command ``yardstick env prepare``. ``yardstick-flavor`` is +``1 vCPU 1G RAM,3G Disk``. +``image`` is the image name of test VMs. If you use ``cirros.3.5.0``, you need +fill the username of this image into ``user``. +The ``policy`` of placement of Test VMs have two values (``affinity`` and +``availability``). ``availability`` means anti-affinity. +In the ``network`` section, you can configure which ``provider`` network and +``physical_network`` you want Test VMs to use. +You may need to configure ``segmentation_id`` when your network is vlan. + +Moreover, you can configure your specific flavor as below, Yardstick will setup +the stack for you. :: flavor: name: yardstick-new-flavor @@ -170,7 +222,8 @@ Moreover, you can configure your specific flavor as below, yardstick will setup disk: 2 -Besides default heat stack, yardstick also allow you to setup other two types stack. they are "Node" and "Kubernetes". :: +Besides default ``Heat`` context, Yardstick also allows you to setup two other +types of context. They are ``Node`` and ``Kubernetes``. :: context: type: Kubernetes @@ -183,48 +236,64 @@ and :: name: LF +The ``scenarios`` section is the description of testing steps, you can +orchestrate the complex testing step through scenarios. -"Scenarios" section is the description of testing step, you can orchestrate the complex testing step through orchestrate scenarios. +Each scenario will do one testing step. +In one scenario, you can configure the type of scenario (operation), ``runner`` +type and ``sla`` of the scenario. -Each scenario will do one testing step, In one scenario, you can configure the type of scenario(operation), runner type and SLA of the scenario. +For TC002, We only have one step, which is Ping from host VM to target VM. In +this step, we also have some detailed operations implemented (such as ssh to +VM, ping from VM1 to VM2. Get the latency, verify the SLA, report the result). -For TC002, We only have one step , that is Ping from host VM to target VM. In this step, we also have some detail operation implement ( such as ssh to VM, ping from VM1 to VM2. Get the latency, verify the SLA, report the result). +If you want to get this implementation details implement, you can check with +the scenario.py file. For Ping scenario, you can find it in Yardstick repo +(``yardstick/yardstick/benchmark/scenarios/networking/ping.py``). -If you want to get this detail implement , you can check with the scenario.py file. For Ping scenario, you can find it in yardstick repo ( yardstick / yardstick / benchmark / scenarios / networking / ping.py) +After you select the type of scenario (such as Ping), you will select one type +of ``runner``, there are 4 types of runner. ``Iteration`` and ``Duration`` are +the most commonly used, and the default is ``Iteration``. -after you select the type of scenario( such as Ping), you will select one type of runner, there are 4 types of runner. Usually, we use the "Iteration" and "Duration". and Default is "Iteration". -For Iteration, you can specify the iteration number and interval of iteration. :: +For ``Iteration``, you can specify the iteration number and interval of iteration. :: runner: type: Iteration iterations: 10 interval: 1 -That means yardstick will iterate the 10 times of Ping test and the interval of each iteration is one second. +That means Yardstick will repeat the Ping test 10 times and the interval of +each iteration is one second. -For Duration, you can specify the duration of this scenario and the interval of each ping test. :: +For ``Duration``, you can specify the duration of this scenario and the +interval of each ping test. :: runner: type: Duration duration: 60 interval: 10 -That means yardstick will run the ping test as loop until the total time of this scenario reach the 60s and the interval of each loop is ten seconds. - +That means Yardstick will run the ping test as loop until the total time of +this scenario reaches 60s and the interval of each loop is ten seconds. -SLA is the criterion of this scenario. that depends on the scenario. different scenario can have different SLA metric. +SLA is the criterion of this scenario. This depends on the scenario. Different +scenarios can have different SLA metric. -**How to write a new test case** -Yardstick already provide a library of testing step. that means yardstick provide lots of type scenario. +How to write a new test case +++++++++++++++++++++++++++++ -Basiclly, What you need to do is to orchestrate the scenario from the library. +Yardstick already provides a library of testing steps (i.e. different types of +scenario). -Here, We will show two cases. One is how to write a simple test case, the other is how to write a quite complex test case. +Basically, what you need to do is to orchestrate the scenario from the library. +Here, we will show two cases. One is how to write a simple test case, the other +is how to write a quite complex test case. Write a new simple test case +'''''''''''''''''''''''''''' First, you can image a basic test case description as below. @@ -314,7 +383,7 @@ First, you can image a basic test case description as below. TODO How can I contribute to Yardstick? ------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are already a contributor of any OPNFV project, you can contribute to Yardstick. If you are totally new to OPNFV, you must first create your Linux @@ -329,7 +398,7 @@ We distinguish 2 levels of contributors: Yardstick commitors are promoted by the Yardstick contributors. Gerrit & JIRA introduction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +++++++++++++++++++++++++++ .. _Gerrit: https://www.gerritcodereview.com/ .. _`OPNFV Gerrit`: http://gerrit.opnfv.org/ @@ -338,7 +407,8 @@ Gerrit & JIRA introduction OPNFV uses Gerrit_ for web based code review and repository management for the Git Version Control System. You can access `OPNFV Gerrit`_. Please note that -you need to have Linux Foundation ID in order to use OPNFV Gerrit. You can get one from this link_. +you need to have Linux Foundation ID in order to use OPNFV Gerrit. You can get +one from this link_. OPNFV uses JIRA_ for issue management. An important principle of change management is to have two-way trace-ability between issue management @@ -350,14 +420,16 @@ If you want to contribute to Yardstick, you can pick a issue from Yardstick's JIRA dashboard or you can create you own issue and submit it to JIRA. Install Git and Git-reviews -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++++++++++++++++++++++++++++ Installing and configuring Git and Git-Review is necessary in order to submit -code to Gerrit. The `Getting to the code <https://wiki.opnfv.org/display/DEV/Developer+Getting+Started>`_ page will provide you with some help for that. +code to Gerrit. The +`Getting to the code <https://wiki.opnfv.org/display/DEV/Developer+Getting+Started>`_ +page will provide you with some help for that. Verify your patch locally before submitting -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++++++++++++++++++++++++++++++++++++++++++++ Once you finish a patch, you can submit it to Gerrit for code review. A developer sends a new patch to Gerrit will trigger patch verify job on Jenkins @@ -366,7 +438,8 @@ code coverage test. Before you submit your patch, it is recommended to run the patch verification in your local environment first. Open a terminal window and set the project's directory to the working -directory using the ``cd`` command. Assume that ``YARDSTICK_REPO_DIR`` is the path to the Yardstick project folder on your computer:: +directory using the ``cd`` command. Assume that ``YARDSTICK_REPO_DIR`` is the +path to the Yardstick project folder on your computer:: cd $YARDSTICK_REPO_DIR @@ -377,7 +450,7 @@ Verify your patch:: It is used in CI but also by the CLI. Submit the code with Git -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +++++++++++++++++++++++++ Tell Git which files you would like to take into account for the next commit. This is called 'staging' the files, by placing them into the staging area, @@ -417,7 +490,7 @@ to the commits, and eventually navigate among the latter more easily. `This document`_ happened to be very clear and useful to get started with that. Push the code to Gerrit for review -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +++++++++++++++++++++++++++++++++++ Now that the code has been comitted into your local Git repository the following step is to push it online to Gerrit for it to be reviewed. The @@ -432,27 +505,27 @@ Yardstick committers and contributors to review your codes. :width: 800px :alt: Gerrit for code review -You can find a list Yardstick people `here <https://wiki.opnfv.org/display/yardstick/People>`_, -or use the ``yardstick-reviewers`` and ``yardstick-committers`` groups in gerrit. +You can find a list Yardstick people +`here <https://wiki.opnfv.org/display/yardstick/People>`_, or use the +``yardstick-reviewers`` and ``yardstick-committers`` groups in gerrit. Modify the code under review in Gerrit -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +++++++++++++++++++++++++++++++++++++++ At the same time the code is being reviewed in Gerrit, you may need to edit it to make some changes and then send it back for review. The following steps go through the procedure. Once you have modified/edited your code files under your IDE, you will have to -stage them. The 'status' command is very helpful at this point as it provides -an overview of Git's current state:: +stage them. The ``git status`` command is very helpful at this point as it +provides an overview of Git's current state:: git status -The output of the command provides us with the files that have been modified -after the latest commit. +This command lists the files that have been modified since the last commit. You can now stage the files that have been modified as part of the Gerrit code -review edition/modification/improvement using ``git add`` command. It is now +review addition/modification/improvement using ``git add`` command. It is now time to commit the newly modified files, but the objective here is not to create a new commit, we simply want to inject the new changes into the previous commit. You can achieve that with the '--amend' option on the @@ -469,7 +542,8 @@ The final step consists in pushing the newly modified commit to Gerrit:: Plugins -========== +------- -For information about Yardstick plugins, refer to the chapter **Installing a plug-in into Yardstick** in the `user guide`_. +For information about Yardstick plugins, refer to the chapter +**Installing a plug-in into Yardstick** in the `user guide`_. diff --git a/docs/testing/developer/devguide/devguide_nsb_prox.rst b/docs/testing/developer/devguide/devguide_nsb_prox.rst index 22628413b..79990055a 100755 --- a/docs/testing/developer/devguide/devguide_nsb_prox.rst +++ b/docs/testing/developer/devguide/devguide_nsb_prox.rst @@ -244,10 +244,13 @@ Now let's examine the components of the file in detail 3. ``nodes`` - This names the Traffic Generator and the System under Test. Does not need to change. -4. ``prox_path`` - Location of the Prox executable on the traffic +4. ``interface_speed_gbps`` - This is an optional parameter. If not present + the system defaults to 10Gbps. This defines the speed of the interfaces. + +5. ``prox_path`` - Location of the Prox executable on the traffic generator (Either baremetal or Openstack Virtual Machine) -5. ``prox_config`` - This is the ``SUT Config File``. +6. ``prox_config`` - This is the ``SUT Config File``. In this case it is ``handle_l2fwd-2.cfg`` A number of additional parameters can be added. This example @@ -285,16 +288,31 @@ Now let's examine the components of the file in detail of a file called ``parameters.lua``, which contains information retrieved from either the hardware or the openstack configuration. -6. ``prox_args`` - this specifies the command line arguments to start +7. ``prox_args`` - this specifies the command line arguments to start prox. See `prox command line`_. -7. ``prox_config`` - This specifies the Traffic Generator config file. +8. ``prox_config`` - This specifies the Traffic Generator config file. + +9. ``runner`` - This is set to ``ProxDuration`` - This specifies that the + test runs for a set duration. Other runner types are available + but it is recommend to use ``ProxDuration`` + + The following parrameters are supported + + ``interval`` - (optional) - This specifies the sampling interval. + Default is 1 sec + + ``sampled`` - (optional) - This specifies if sampling information is + required. Default ``no`` + + ``duration`` - This is the length of the test in seconds. Default + is 60 seconds. -8. ``runner`` - This is set to ``Duration`` - This specified that the - test run for a set duration. Other runner types are available - but it is recommend to use ``Duration`` + ``confirmation`` - This specifies the number of confirmation retests to + be made before deciding to increase or decrease line speed. Default 0. + +10. ``context`` - This is ``context`` for a 2 port Baremetal configuration. -9. ``context`` - This is ``context`` for a 2 port Baremetal configuration. If a 4 port configuration was required then file ``prox-baremetal-4.yaml`` would be used. This is the NSB Prox baremetal configuration file. @@ -304,7 +322,8 @@ Now let's examine the components of the file in detail *Traffic Profile file* ---------------------- -This describes the details of the traffic flow. In this case ``prox_binsearch.yaml`` is used. +This describes the details of the traffic flow. In this case +``prox_binsearch.yaml`` is used. .. image:: images/PROX_Traffic_profile.png :width: 800px @@ -326,21 +345,29 @@ This describes the details of the traffic flow. In this case ``prox_binsearch.ya Custom traffic types can be created by creating a new traffic profile class. -3. ``tolerated_loss`` - This specifies the percentage of packets that can be lost/dropped before - we declare success or failure. Success is Transmitted-Packets from Traffic Generator is greater than or equal to +3. ``tolerated_loss`` - This specifies the percentage of packets that + can be lost/dropped before + we declare success or failure. Success is Transmitted-Packets from + Traffic Generator is greater than or equal to packets received by Traffic Generator plus tolerated loss. -4. ``test_precision`` - This specifies the precision of the test results. For some tests the success criteria - may never be achieved because the test precision may be greater than the successful throughput. For finer - results increase the precision by making this value smaller. +4. ``test_precision`` - This specifies the precision of the test + results. For some tests the success criteria may never be + achieved because the test precision may be greater than the + successful throughput. For finer results increase the precision + by making this value smaller. -5. ``packet_sizes`` - This specifies the range of packets size this test is run for. +5. ``packet_sizes`` - This specifies the range of packets size this + test is run for. -6. ``duration`` - This specifies the sample duration that the test uses to check for success or failure. +6. ``duration`` - This specifies the sample duration that the test + uses to check for success or failure. -7. ``lower_bound`` - This specifies the test initial lower bound sample rate. On success this value is increased. +7. ``lower_bound`` - This specifies the test initial lower bound sample rate. + On success this value is increased. -8. ``upper_bound`` - This specifies the test initial upper bound sample rate. On success this value is decreased. +8. ``upper_bound`` - This specifies the test initial upper bound sample rate. + On success this value is decreased. Other traffic profiles exist eg prox_ACL.yaml which does not compare what is received with what is transmitted. It just @@ -371,14 +398,18 @@ See this prox_vpe.yaml as example:: We will use ``tc_prox_heat_context_l2fwd-2.yaml`` as a example to show you how to understand the test description file. -.. image:: images/PROX_Test_HEAT_Script.png +.. image:: images/PROX_Test_HEAT_Script1.png :width: 800px - :alt: NSB PROX Test Description File + :alt: NSB PROX Test Description File - Part 1 + +.. image:: images/PROX_Test_HEAT_Script2.png + :width: 800px + :alt: NSB PROX Test Description File - Part 2 Now lets examine the components of the file in detail -Sections 1 to 8 are exactly the same in Baremetal and in Heat. Section -``9`` is replaced with sections A to F. Section 9 was for a baremetal +Sections 1 to 9 are exactly the same in Baremetal and in Heat. Section +``10`` is replaced with sections A to F. Section 10 was for a baremetal configuration file. This has no place in a heat configuration. A. ``image`` - yardstick-samplevnfs. This is the name of the image @@ -418,12 +449,12 @@ F. ``networks`` - is composed of a management network labeled ``mgmt`` gateway_ip: 'null' port_security_enabled: False enable_dhcp: 'false' - downlink_1: + uplink_1: cidr: '10.0.4.0/24' gateway_ip: 'null' port_security_enabled: False enable_dhcp: 'false' - downlink_2: + downlink_1: cidr: '10.0.5.0/24' gateway_ip: 'null' port_security_enabled: False @@ -1033,7 +1064,7 @@ If PROX NSB does not work on baremetal, problem is either in network configurati 1. What is received on 0 is transmitted on 1, received on 1 transmitted on 0, received on 2 transmitted on 3 and received on 3 transmitted on 2. 2. No packets are Failed. - 3. No Packets are discarded. + 3. No packets are discarded. We can also dump the packets being received or transmitted via the following commands. :: @@ -1228,7 +1259,69 @@ Where 4) ir.intel.com = local no proxy +*How to Understand the Grafana output?* +--------------------------------------- + + .. image:: images/PROX_Grafana_1.png + :width: 1000px + :alt: NSB PROX Grafana_1 + + .. image:: images/PROX_Grafana_2.png + :width: 1000px + :alt: NSB PROX Grafana_2 + + .. image:: images/PROX_Grafana_3.png + :width: 1000px + :alt: NSB PROX Grafana_3 + + .. image:: images/PROX_Grafana_4.png + :width: 1000px + :alt: NSB PROX Grafana_4 + +A. Test Parameters - Test interval, Duartion, Tolerated Loss and Test Precision + +B. Overall No of packets send and received during test + +C. Generator Stats - packets sent, received and attempted by Generator + +D. Packets Size + +E. No of packets received by SUT + +F. No of packets forwarded by SUT + +G. This is the number of packets sent by the generator per port, for each interval. + +H. This is the number of packets received by the generator per port, for each interval. + +I. This is the number of packets send and received by the generator and lost by the SUT + that meet the success criteria + +J. This is the changes the Percentage of Line Rate used over a test, The MAX and the + MIN should converge to within the interval specified as the ``test-precision``. + +K. This is the packets Size supported during test. If "N/A" appears in any field the result has not been decided. + +L. This is the calculated throughput in MPPS(Million Packets Per second) for this line rate. + +M. This is the actual No, of packets sent by the generator in MPPS + +N. This is the actual No. of packets received by the generator in MPPS + +O. This is the total No. of packets sent by SUT. + +P. This is the total No. of packets received by the SUT + +Q. This is the total No. of packets dropped. (These packets were sent by the generator but not + received back by the generator, these may be dropped by the SUT or the Generator) + +R. This is the tolerated no of packets that can be dropped. + +S. This is the test Throughput in Gbps + +T. This is the Latencey per Port +U. This is the CPU Utilization diff --git a/docs/testing/developer/devguide/images/PROX_Grafana_1.png b/docs/testing/developer/devguide/images/PROX_Grafana_1.png Binary files differnew file mode 100644 index 000000000..d272edcf3 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Grafana_1.png diff --git a/docs/testing/developer/devguide/images/PROX_Grafana_2.png b/docs/testing/developer/devguide/images/PROX_Grafana_2.png Binary files differnew file mode 100644 index 000000000..4f7fd4cf5 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Grafana_2.png diff --git a/docs/testing/developer/devguide/images/PROX_Grafana_3.png b/docs/testing/developer/devguide/images/PROX_Grafana_3.png Binary files differnew file mode 100644 index 000000000..5ae967698 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Grafana_3.png diff --git a/docs/testing/developer/devguide/images/PROX_Grafana_4.png b/docs/testing/developer/devguide/images/PROX_Grafana_4.png Binary files differnew file mode 100644 index 000000000..5353d1c7e --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Grafana_4.png diff --git a/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png b/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png Binary files differindex 32530eb15..c09f7bb1b 100644 --- a/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png +++ b/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png diff --git a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png Binary files differdeleted file mode 100644 index 754973b4e..000000000 --- a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png +++ /dev/null diff --git a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script1.png b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script1.png Binary files differnew file mode 100644 index 000000000..bd375dba1 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script1.png diff --git a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script2.png b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script2.png Binary files differnew file mode 100644 index 000000000..99d9d24e6 --- /dev/null +++ b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script2.png diff --git a/docs/testing/developer/devguide/index.rst b/docs/testing/developer/devguide/index.rst index 92a18f6ee..9a76a32f1 100644 --- a/docs/testing/developer/devguide/index.rst +++ b/docs/testing/developer/devguide/index.rst @@ -14,3 +14,4 @@ Yardstick Developer Guide :numbered: devguide + devguide_nsb_prox diff --git a/nsb_setup.sh b/nsb_setup.sh index 3396b82d1..1f5344980 100755 --- a/nsb_setup.sh +++ b/nsb_setup.sh @@ -63,7 +63,7 @@ for i in "${pkg[@]}"; do fi done -pip install ansible==2.4.2 shade==1.22.2 docker-py==1.10.6 +pip install ansible==2.5.5 shade==1.22.2 docker-py==1.10.6 ANSIBLE_SCRIPTS="ansible" diff --git a/requirements.txt b/requirements.txt index 60014d75e..43d7120db 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ Babel==2.3.4 # BSD; OSI Approved BSD License Jinja2==2.9.6 # BSD; OSI Approved BSD License SQLAlchemy==1.1.12 # MIT License; OSI Approved MIT License PTable==0.9.2 # BSD (3 clause); OSI Approved BSD License -ansible==2.4.2 # GPLv3; OSI Approved GNU General Public License v3 or later (GPLv3+) +ansible==2.5.5 # GPLv3; OSI Approved GNU General Public License v3 or later (GPLv3+) backport-ipaddress==0.1; python_version <= "2.7" # OSI Approved Python Software Foundation License chainmap==1.0.2 # Python Software Foundation License; OSI Approved Python Software Foundation License cmd2==0.8.6 # MIT License; OSI Approved MIT License @@ -29,20 +29,20 @@ futures==3.1.1;python_version=='2.7' # BSD; OSI Approved BSD License influxdb==4.1.1 # MIT License; OSI Approved MIT License IxNetwork==8.40.1124.9 # MIT License; OSI Approved MIT License jinja2schema==0.1.4 # OSI Approved BSD License -keystoneauth1==3.1.0 # OSI Approved Apache Software License +keystoneauth1==3.3.0 # OSI Approved Apache Software License kubernetes==6.0.0 # OSI Approved Apache Software License mock==2.0.0 # OSI Approved BSD License; `BSD License`_; http://github.com/testing-cabal/mock/blob/master/LICENSE.txt msgpack-python==0.4.8 # OSI Approved Apache Software License netaddr==0.7.19 # BSD License; OSI Approved BSD License; OSI Approved MIT License netifaces==0.10.6 # MIT License; OSI Approved MIT License os-client-config==1.28.0 # OSI Approved Apache Software License -osc-lib==1.7.0 # OSI Approved Apache Software License -oslo.config==4.11.1 # OSI Approved Apache Software License +osc-lib==1.8.0 # OSI Approved Apache Software License +oslo.config==5.1.0 # OSI Approved Apache Software License oslo.i18n==3.17.0 # OSI Approved Apache Software License -oslo.messaging===5.36.0 # OSI Approved Apache Software License -oslo.privsep===1.22.1 # OSI Approved Apache Software License +oslo.messaging==5.36.0 # OSI Approved Apache Software License +oslo.privsep==1.23.0 # OSI Approved Apache Software License oslo.serialization==2.20.1 # OSI Approved Apache Software License -oslo.utils==3.28.0 # OSI Approved Apache Software License +oslo.utils==3.33.0 # OSI Approved Apache Software License paramiko==2.2.1 # LGPL; OSI Approved GNU Library or Lesser General Public License (LGPL) pbr==3.1.1 # OSI Approved Apache Software License; Apache License, Version 2.0 pika==0.10.0 # BSD; OSI Approved BSD License @@ -52,13 +52,13 @@ pycrypto==2.6.1 # Public Domain pyparsing==2.2.0 # MIT License; OSI Approved MIT License pyroute2==0.4.21 # dual license GPLv2+ and Apache v2; OSI Approved GNU General Public License v2 or later (GPLv2+); OSI Approved Apache Software License pyrsistent==0.14.1 # LICENSE.mit; OSI Approved MIT License -python-cinderclient==3.1.0 # OSI Approved Apache Software License +python-cinderclient==3.3.0 # OSI Approved Apache Software License python-glanceclient==2.8.0 # OSI Approved Apache Software License python-keystoneclient==3.13.0 # OSI Approved Apache Software License python-neutronclient==6.5.0 # OSI Approved Apache Software License python-novaclient==9.1.1 # OSI Approved Apache Software License pyzmq==16.0.2 # LGPL+BSD; OSI Approved GNU Library or Lesser General Public License (LGPL); OSI Approved BSD License -requests==2.11.1 # Apache 2.0; OSI Approved Apache Software License +requests==2.14.2 # Apache 2.0; OSI Approved Apache Software License requestsexceptions==1.3.0 # OSI Approved Apache Software License scp==0.10.2 # LGPL shade==1.22.2 # OSI Approved Apache Software License diff --git a/test-requirements.txt b/test-requirements.txt index 4828e98b0..7825cc5d2 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -14,8 +14,8 @@ testtools==2.3.0 # OSI Approved MIT License unittest2==1.1.0 # OSI Approved BSD License # NOTE(ralonsoh): to be removed, only for coverage support -python-heatclient==1.8.1 # OSI Approved Apache Software License +python-heatclient==1.11.0 # OSI Approved Apache Software License -# Yardstick F release <-> OpenStack Pike release -openstack_requirements==1.1.0 # OSI Approved Apache Software License --e git+https://github.com/openstack/requirements.git@stable/pike#egg=os_requirements +# Yardstick G release <-> OpenStack Queens release +openstack_requirements==1.2.0 # OSI Approved Apache Software License +-e git+https://github.com/openstack/requirements.git@stable/queens#egg=os_requirements diff --git a/tests/opnfv/test_suites/opnfv_os-odl-ovs-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-ovs-noha_daily.yaml new file mode 100644 index 000000000..83b370066 --- /dev/null +++ b/tests/opnfv/test_suites/opnfv_os-odl-ovs-noha_daily.yaml @@ -0,0 +1,64 @@ +############################################################################## +# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +# os-odl-ovs-noha daily task suite + +schema: "yardstick:suite:0.1" + +name: "os-odl-ovs-noha" +test_cases_dir: "tests/opnfv/test_cases/" +test_cases: +- + file_name: opnfv_yardstick_tc002.yaml +- + file_name: opnfv_yardstick_tc005.yaml +- + file_name: opnfv_yardstick_tc010.yaml +- + file_name: opnfv_yardstick_tc011.yaml + constraint: + installer: compass,fuel +- + file_name: opnfv_yardstick_tc012.yaml +- + file_name: opnfv_yardstick_tc014.yaml +- + file_name: opnfv_yardstick_tc037.yaml +- + file_name: opnfv_yardstick_tc055.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5"}' +- + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5"}' +- + file_name: opnfv_yardstick_tc069.yaml +- + file_name: opnfv_yardstick_tc070.yaml +- + file_name: opnfv_yardstick_tc071.yaml +- + file_name: opnfv_yardstick_tc072.yaml +- + file_name: opnfv_yardstick_tc075.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node1"}' diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py index 022e365e4..1c798fbb3 100644 --- a/yardstick/benchmark/contexts/base.py +++ b/yardstick/benchmark/contexts/base.py @@ -81,6 +81,9 @@ class Context(object): self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path) cfg = utils.read_yaml_file(self.file_path) + for node in cfg["nodes"]: + node["ctx_type"] = self.__context_type__ + self.nodes.extend(cfg["nodes"]) self.controllers.extend([node for node in cfg["nodes"] if node.get("role") == "Controller"]) diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py index 4d7c4f9be..eb62d6222 100644 --- a/yardstick/benchmark/scenarios/networking/vnf_generic.py +++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py @@ -23,6 +23,7 @@ import time import six import yaml +from yardstick.benchmark.contexts import base as context_base from yardstick.benchmark.scenarios import base as scenario_base from yardstick.common.constants import LOG_DIR from yardstick.common import exceptions @@ -36,6 +37,7 @@ from yardstick.network_services.traffic_profile import base as tprofile_base from yardstick.network_services.utils import get_nsb_option from yardstick import ssh + traffic_profile.register_modules() @@ -448,7 +450,7 @@ class NetworkServiceTestCase(scenario_base.Scenario): traffic_gen.listen_traffic(self.traffic_profile) # register collector with yardstick for KPI collection. - self.collector = Collector(self.vnfs) + self.collector = Collector(self.vnfs, context_base.Context.get_physical_nodes()) self.collector.start() # Start the actual traffic diff --git a/yardstick/network_services/collector/subscriber.py b/yardstick/network_services/collector/subscriber.py index 322b3f5a2..937c266a6 100644 --- a/yardstick/network_services/collector/subscriber.py +++ b/yardstick/network_services/collector/subscriber.py @@ -14,17 +14,36 @@ """This module implements stub for publishing results in yardstick format.""" import logging +from yardstick.network_services.nfvi.resource import ResourceProfile +from yardstick.network_services.utils import get_nsb_option + + LOG = logging.getLogger(__name__) class Collector(object): """Class that handles dictionary of results in yardstick-plot format.""" - def __init__(self, vnfs): + def __init__(self, vnfs, contexts_nodes, timeout=3600): super(Collector, self).__init__() self.vnfs = vnfs + self.nodes = contexts_nodes + self.bin_path = get_nsb_option('bin_path', '') + self.resource_profiles = {} + + for ctx_name, nodes in contexts_nodes.items(): + for node in (node for node in nodes if node.get('collectd')): + name = ".".join([node['name'], ctx_name]) + self.resource_profiles.update( + {name: ResourceProfile.make_from_node(node, timeout)} + ) def start(self): + for resource in self.resource_profiles.values(): + resource.initiate_systemagent(self.bin_path) + resource.start() + resource.amqp_process_for_nfvi_kpi() + for vnf in self.vnfs: vnf.start_collect() @@ -32,6 +51,9 @@ class Collector(object): for vnf in self.vnfs: vnf.stop_collect() + for resource in self.resource_profiles.values(): + resource.stop() + def get_kpi(self): """Returns dictionary of results in yardstick-plot format @@ -42,7 +64,12 @@ class Collector(object): for vnf in self.vnfs: # Result example: # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }} - LOG.debug("collect KPI for %s", vnf.name) + LOG.debug("collect KPI for vnf %s", vnf.name) results[vnf.name] = vnf.collect_kpi() + for node_name, resource in self.resource_profiles.items(): + LOG.debug("collect KPI for nfvi_node %s", node_name) + results[node_name] = {"core": resource.amqp_collect_nfvi_kpi()} + LOG.debug("%s collect KPIs %s", node_name, results[node_name]['core']) + return results diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py index 0c0bf223a..5922bd3b9 100644 --- a/yardstick/network_services/nfvi/resource.py +++ b/yardstick/network_services/nfvi/resource.py @@ -31,6 +31,7 @@ from yardstick.common.exceptions import ResourceCommandError from yardstick.common.task_template import finalize_for_yaml from yardstick.common.utils import validate_non_string_sequence from yardstick.network_services.nfvi.collectd import AmqpConsumer +from yardstick.benchmark.contexts import heat LOG = logging.getLogger(__name__) @@ -52,7 +53,8 @@ class ResourceProfile(object): DEFAULT_TIMEOUT = 3600 OVS_SOCKET_PATH = "/usr/local/var/run/openvswitch/db.sock" - def __init__(self, mgmt, port_names=None, plugins=None, interval=None, timeout=None): + def __init__(self, mgmt, port_names=None, plugins=None, + interval=None, timeout=None, reset_mq_flag=True): if plugins is None: self.plugins = {} @@ -77,6 +79,7 @@ class ResourceProfile(object): # we need to save mgmt so we can connect to port 5672 self.mgmt = mgmt self.connection = ssh.AutoConnectSSH.from_node(mgmt) + self._reset_mq_flag = reset_mq_flag @classmethod def make_from_node(cls, node, timeout): @@ -87,7 +90,10 @@ class ResourceProfile(object): plugins = collectd_options.get("plugins", {}) interval = collectd_options.get("interval") - return cls(node, plugins=plugins, interval=interval, timeout=timeout) + reset_mq_flag = (False if node.get("ctx_type") == heat.HeatContext.__context_type__ + else True) + return cls(node, plugins=plugins, interval=interval, + timeout=timeout, reset_mq_flag=reset_mq_flag) def check_if_system_agent_running(self, process): """ verify if system agent is running """ @@ -210,11 +216,14 @@ class ResourceProfile(object): if not self.enable: return {} + if self.check_if_system_agent_running("collectd")[0] != 0: + return {} + metric = {} while not self._queue.empty(): metric.update(self._queue.get()) - msg = self.parse_collectd_result(metric) - return msg + + return self.parse_collectd_result(metric) def _provide_config_file(self, config_file_path, nfvi_cfg, template_kwargs): template = pkg_resources.resource_string("yardstick.network_services.nfvi", @@ -250,7 +259,7 @@ class ResourceProfile(object): if status != 0: LOG.error("cannot find OVS socket %s", socket_path) - def _start_rabbitmq(self, connection): + def _reset_rabbitmq(self, connection): # Reset amqp queue LOG.debug("reset and setup amqp to collect data from collectd") # ensure collectd.conf.d exists to avoid error/warning @@ -263,10 +272,37 @@ class ResourceProfile(object): "sudo rabbitmqctl authenticate_user admin admin", "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'" ] + for cmd in cmd_list: - exit_status, stdout, stderr = connection.execute(cmd) - if exit_status != 0: - raise ResourceCommandError(command=cmd, stderr=stderr) + exit_status, _, stderr = connection.execute(cmd) + if exit_status != 0: + raise ResourceCommandError(command=cmd, stderr=stderr) + + def _check_rabbitmq_user(self, connection, user='admin'): + exit_status, stdout, _ = connection.execute("sudo rabbitmqctl list_users") + if exit_status == 0: + for line in stdout.split('\n')[1:]: + if line.split('\t')[0] == user: + return True + + def _set_rabbitmq_admin_user(self, connection): + LOG.debug("add admin user to amqp") + cmd_list = ["sudo rabbitmqctl add_user admin admin", + "sudo rabbitmqctl authenticate_user admin admin", + "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'" + ] + + for cmd in cmd_list: + exit_status, stdout, stderr = connection.execute(cmd) + if exit_status != 0: + raise ResourceCommandError(command=cmd, stdout=stdout, stderr=stderr) + + def _start_rabbitmq(self, connection): + if self._reset_mq_flag: + self._reset_rabbitmq(connection) + else: + if not self._check_rabbitmq_user(connection): + self._set_rabbitmq_admin_user(connection) # check stdout for "sudo rabbitmqctl status" command cmd = "sudo rabbitmqctl status" @@ -282,10 +318,11 @@ class ResourceProfile(object): self._prepare_collectd_conf(config_file_path) connection.execute('sudo pkill -x -9 collectd') - exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0] + cmd = "which %s > /dev/null 2>&1" % collectd_path + exit_status, _, stderr = connection.execute(cmd) if exit_status != 0: - LOG.warning("%s is not present disabling", collectd_path) - return + raise ResourceCommandError(command=cmd, stderr=stderr) + if "ovs_stats" in self.plugins: self._setup_ovs_stats(connection) @@ -293,8 +330,12 @@ class ResourceProfile(object): LOG.debug("Start collectd service..... %s second timeout", self.timeout) # intel_pmu plug requires large numbers of files open, so try to set # ulimit -n to a large value - connection.execute("sudo bash -c 'ulimit -n 1000000 ; %s'" % collectd_path, - timeout=self.timeout) + + cmd = "sudo bash -c 'ulimit -n 1000000 ; %s'" % collectd_path + exit_status, _, stderr = connection.execute(cmd, timeout=self.timeout) + if exit_status != 0: + raise ResourceCommandError(command=cmd, stderr=stderr) + LOG.debug("Done") def initiate_systemagent(self, bin_path): @@ -334,5 +375,7 @@ class ResourceProfile(object): if pid: self.connection.execute('sudo kill -9 "%s"' % pid) self.connection.execute('sudo pkill -9 "%s"' % agent) - self.connection.execute('sudo service rabbitmq-server stop') - self.connection.execute("sudo rabbitmqctl stop_app") + + if self._reset_mq_flag: + self.connection.execute('sudo service rabbitmq-server stop') + self.connection.execute("sudo rabbitmqctl stop_app") diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py index e105c2f55..39336785e 100644 --- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py +++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py @@ -25,6 +25,10 @@ class IXIARFC2544Profile(TrexProfile): UPLINK = 'uplink' DOWNLINK = 'downlink' + def __init__(self, yaml_data): + super(IXIARFC2544Profile, self).__init__(yaml_data) + self.rate = self.config.frame_rate + def _get_ixia_traffic_profile(self, profile_data, mac=None): if mac is None: mac = {} diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py index 366c5b26b..bc810ecb3 100644 --- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py @@ -21,6 +21,7 @@ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfS from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF from yardstick.network_services import constants +from yardstick.benchmark.contexts import base as context_base LOG = logging.getLogger(__name__) @@ -68,13 +69,19 @@ class ProxApproxVnf(SampleVNF): def collect_kpi(self): # we can't get KPIs if the VNF is down check_if_process_failed(self._vnf_process, 0.01) + + physical_node = context_base.Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) + + result = {"physical_node": physical_node} + if self.resource_helper is None: - result = { + result.update({ "packets_in": 0, "packets_dropped": 0, "packets_fwd": 0, "collect_stats": {"core": {}}, - } + }) return result if (self.tsc_hz == 0): @@ -102,14 +109,14 @@ class ProxApproxVnf(SampleVNF): tsc = tsc / port_count - result = { + result.update({ "packets_in": rx_total, "packets_dropped": max((tx_total - rx_total), 0), "packets_fwd": tx_total, # we share ProxResourceHelper with TG, but we want to collect # collectd KPIs here and not TG KPIs, so use a different method name "collect_stats": self.resource_helper.collect_collectd_kpi(), - } + }) try: curr_packets_in = int(((rx_total - self.prev_packets_in) * self.tsc_hz) / (tsc - self.prev_tsc)) diff --git a/yardstick/network_services/vnf_generic/vnf/router_vnf.py b/yardstick/network_services/vnf_generic/vnf/router_vnf.py index aea27ffa6..90b7b215e 100644 --- a/yardstick/network_services/vnf_generic/vnf/router_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/router_vnf.py @@ -47,7 +47,6 @@ class RouterVNF(SampleVNF): def instantiate(self, scenario_cfg, context_cfg): self.scenario_helper.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name]) self.configure_routes(self.name, scenario_cfg, context_cfg) def wait_for_instantiate(self): @@ -107,8 +106,11 @@ class RouterVNF(SampleVNF): stdout = self.ssh_helper.execute(ip_link_stats)[1] link_stats = self.get_stats(stdout) # get RX/TX from link_stats and assign to results + physical_node = Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) result = { + "physical_node": physical_node, "packets_in": 0, "packets_dropped": 0, "packets_fwd": 0, diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py index 3976acb76..1ee71aa25 100644 --- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py @@ -39,7 +39,7 @@ from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen from yardstick.network_services.vnf_generic.vnf.base import GenericVNF from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper - +from yardstick.benchmark.contexts.node import NodeContext LOG = logging.getLogger(__name__) @@ -319,6 +319,7 @@ class ResourceHelper(object): self.resource = None self.setup_helper = setup_helper self.ssh_helper = setup_helper.ssh_helper + self._enable = True def setup(self): self.resource = self.setup_helper.setup_vnf_environment() @@ -326,22 +327,33 @@ class ResourceHelper(object): def generate_cfg(self): pass + def update_from_context(self, context, attr_name): + """Disable resource helper in case of baremetal context. + + And update appropriate node collectd options in context + """ + if isinstance(context, NodeContext): + self._enable = False + context.update_collectd_options_for_node(self.setup_helper.collectd_options, + attr_name) + def _collect_resource_kpi(self): result = {} status = self.resource.check_if_system_agent_running("collectd")[0] - if status == 0: + if status == 0 and self._enable: result = self.resource.amqp_collect_nfvi_kpi() result = {"core": result} return result def start_collect(self): - self.resource.initiate_systemagent(self.ssh_helper.bin_path) - self.resource.start() - self.resource.amqp_process_for_nfvi_kpi() + if self._enable: + self.resource.initiate_systemagent(self.ssh_helper.bin_path) + self.resource.start() + self.resource.amqp_process_for_nfvi_kpi() def stop_collect(self): - if self.resource: + if self.resource and self._enable: self.resource.stop() def collect_kpi(self): @@ -631,7 +643,6 @@ class SampleVNF(GenericVNF): self.resource_helper = resource_helper_type(self.setup_helper) self.context_cfg = None - self.nfvi_context = None self.pipeline_kwargs = {} self.uplink_ports = None self.downlink_ports = None @@ -658,8 +669,10 @@ class SampleVNF(GenericVNF): self._update_collectd_options(scenario_cfg, context_cfg) self.scenario_helper.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name]) - # self.nfvi_context = None + self.resource_helper.update_from_context( + Context.get_context_from_server(self.scenario_helper.nodes[self.name]), + self.scenario_helper.nodes[self.name] + ) # vnf deploy is unsupported, use ansible playbooks if self.scenario_helper.options.get("vnf_deploy", False): @@ -813,15 +826,18 @@ class SampleVNF(GenericVNF): check_if_process_failed(self._vnf_process, 0.01) stats = self.get_stats() m = re.search(self.COLLECT_KPI, stats, re.MULTILINE) + physical_node = Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) + + result = {"physical_node": physical_node} if m: - result = {k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()} + result.update({k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()}) result["collect_stats"] = self.resource_helper.collect_kpi() else: - result = { - "packets_in": 0, - "packets_fwd": 0, - "packets_dropped": 0, - } + result.update({"packets_in": 0, + "packets_fwd": 0, + "packets_dropped": 0}) + LOG.debug("%s collect KPIs %s", self.APP_NAME, result) return result @@ -867,6 +883,11 @@ class SampleVNFTrafficGen(GenericTrafficGen): def instantiate(self, scenario_cfg, context_cfg): self.scenario_helper.scenario_cfg = scenario_cfg + self.resource_helper.update_from_context( + Context.get_context_from_server(self.scenario_helper.nodes[self.name]), + self.scenario_helper.nodes[self.name] + ) + self.resource_helper.setup() # must generate_cfg after DPDK bind because we need port number self.resource_helper.generate_cfg() @@ -921,9 +942,14 @@ class SampleVNFTrafficGen(GenericTrafficGen): def collect_kpi(self): # check if the tg processes have exited + physical_node = Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) + + result = {"physical_node": physical_node} for proc in (self._tg_process, self._traffic_process): check_if_process_failed(proc) - result = self.resource_helper.collect_kpi() + + result["collect_stats"] = self.resource_helper.collect_kpi() LOG.debug("%s collect KPIs %s", self.APP_NAME, result) return result diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py index 02e7803f7..102c66f78 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py @@ -20,7 +20,7 @@ import os import shutil from collections import OrderedDict -from subprocess import call +import subprocess from yardstick.common import utils from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen @@ -101,7 +101,7 @@ class IxLoadResourceHelper(ClientResourceHelper): LOG.debug(cmd) if not os.path.ismount(self.RESULTS_MOUNT): - call(cmd, shell=True) + subprocess.call(cmd, shell=True) shutil.rmtree(self.RESULTS_MOUNT, ignore_errors=True) utils.makedirs(self.RESULTS_MOUNT) @@ -157,7 +157,7 @@ class IxLoadTrafficGen(SampleVNFTrafficGen): args="'%s'" % ixload_config) LOG.debug(cmd) - call(cmd, shell=True) + subprocess.call(cmd, shell=True) with open(self.ssh_helper.join_bin_path("ixLoad_HTTP_Client.csv")) as csv_file: lines = csv_file.readlines()[10:] @@ -172,5 +172,5 @@ class IxLoadTrafficGen(SampleVNFTrafficGen): self.resource_helper.data = self.resource_helper.make_aggregates() def terminate(self): - call(["pkill", "-9", "http_ixload.py"]) + subprocess.call(["pkill", "-9", "http_ixload.py"]) super(IxLoadTrafficGen, self).terminate() diff --git a/yardstick/network_services/vnf_generic/vnf/udp_replay.py b/yardstick/network_services/vnf_generic/vnf/udp_replay.py index a57f53bc7..fa92744d8 100644 --- a/yardstick/network_services/vnf_generic/vnf/udp_replay.py +++ b/yardstick/network_services/vnf_generic/vnf/udp_replay.py @@ -19,7 +19,7 @@ from yardstick.common.process import check_if_process_failed from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper - +from yardstick.benchmark.contexts import base as ctx_base LOG = logging.getLogger(__name__) @@ -79,9 +79,11 @@ class UdpReplayApproxVnf(SampleVNF): ports_mask_hex = hex(sum(2 ** num for num in port_nums)) # one core extra for master cpu_mask_hex = hex(2 ** (number_of_ports + 1) - 1) + nfvi_context = ctx_base.Context.get_context_from_server( + self.scenario_helper.nodes[self.name]) hw_csum = "" if (not self.scenario_helper.options.get('hw_csum', False) or - self.nfvi_context.attrs.get('nfvi_type') not in self.HW_OFFLOADING_NFVI_TYPES): + nfvi_context.attrs.get('nfvi_type') not in self.HW_OFFLOADING_NFVI_TYPES): hw_csum = '--no-hw-csum' # tuples of (FLD_PORT, FLD_QUEUE, FLD_LCORE) @@ -116,7 +118,12 @@ class UdpReplayApproxVnf(SampleVNF): stats = self.get_stats() stats_words = stats.split() split_stats = stats_words[stats_words.index('0'):][:number_of_ports * 5] + + physical_node = ctx_base.Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) + result = { + "physical_node": physical_node, "packets_in": get_sum(1), "packets_fwd": get_sum(2), "packets_dropped": get_sum(3) + get_sum(4), diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py index 9deef5cfa..bfff45c67 100644 --- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py @@ -28,6 +28,7 @@ from yardstick.common.process import check_if_process_failed from yardstick.network_services.helpers.samplevnf_helper import PortPairs from yardstick.network_services.pipeline import PipelineRules from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper +from yardstick.benchmark.contexts import base as ctx_base LOG = logging.getLogger(__name__) @@ -302,7 +303,11 @@ class VpeApproxVnf(SampleVNF): def collect_kpi(self): # we can't get KPIs if the VNF is down check_if_process_failed(self._vnf_process) + physical_node = ctx_base.Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) + result = { + "physical_node": physical_node, 'pkt_in_up_stream': 0, 'pkt_drop_up_stream': 0, 'pkt_in_down_stream': 0, diff --git a/yardstick/tests/unit/apiserver/utils/test_influx.py b/yardstick/tests/unit/apiserver/utils/test_influx.py index 95105d8ae..6021d35df 100644 --- a/yardstick/tests/unit/apiserver/utils/test_influx.py +++ b/yardstick/tests/unit/apiserver/utils/test_influx.py @@ -31,15 +31,17 @@ class GetDataDbClientTestCase(base.BaseUnitTestCase): _mock_parser.read.assert_called_once_with(constants.CONF_FILE) mock_get_client.assert_called_once_with(_mock_parser) + @mock.patch.object(influx.logger, 'error') @mock.patch.object(influx, '_get_influxdb_client', return_value='fake_client') @mock.patch.object(influx.ConfigParser, 'ConfigParser') - def test_get_data_db_client_parsing_error(self, mock_parser, - mock_get_client): + def test_get_data_db_client_parsing_error( + self, mock_parser, mock_get_client, *args): _mock_parser = mock.Mock() mock_parser.return_value = _mock_parser mock_parser.NoOptionError = configparser.NoOptionError - mock_get_client.side_effect = configparser.NoOptionError('option', 'section') + mock_get_client.side_effect = configparser.NoOptionError('option', + 'section') with self.assertRaises(configparser.NoOptionError): influx.get_data_db_client() diff --git a/yardstick/tests/unit/benchmark/contexts/test_base.py b/yardstick/tests/unit/benchmark/contexts/test_base.py index 8f1cbcc55..1e63b4831 100644 --- a/yardstick/tests/unit/benchmark/contexts/test_base.py +++ b/yardstick/tests/unit/benchmark/contexts/test_base.py @@ -25,6 +25,8 @@ from yardstick.common.constants import YARDSTICK_ROOT_PATH class DummyContextClass(Context): + __context_type__ = "Dummy" + def __init__(self, host_name_separator='.'): super(DummyContextClass, self).__init__\ (host_name_separator=host_name_separator) diff --git a/yardstick/tests/unit/benchmark/contexts/test_node.py b/yardstick/tests/unit/benchmark/contexts/test_node.py index 5d7b24c3d..7fd13a406 100644 --- a/yardstick/tests/unit/benchmark/contexts/test_node.py +++ b/yardstick/tests/unit/benchmark/contexts/test_node.py @@ -170,7 +170,7 @@ class NodeContextTestCase(unittest.TestCase): def test__get_physical_nodes(self): self.test_context.init(self.attrs) nodes = self.test_context._get_physical_nodes() - self.assertEquals(nodes, self.test_context.nodes) + self.assertEqual(nodes, self.test_context.nodes) def test__get_physical_node_for_server(self): self.test_context.init(self.attrs) @@ -198,7 +198,7 @@ class NodeContextTestCase(unittest.TestCase): node_collectd_options = [node for node in self.test_context.nodes if node['name'] == 'node1'][0]['collectd'] - self.assertEquals(node_collectd_options, options) + self.assertEqual(node_collectd_options, options) @mock.patch('{}.NodeContext._dispatch_script'.format(PREFIX)) def test_deploy(self, dispatch_script_mock): diff --git a/yardstick/tests/unit/benchmark/core/test_plugin.py b/yardstick/tests/unit/benchmark/core/test_plugin.py index 0d14e4e86..53621316b 100644 --- a/yardstick/tests/unit/benchmark/core/test_plugin.py +++ b/yardstick/tests/unit/benchmark/core/test_plugin.py @@ -12,6 +12,7 @@ import os import pkg_resources import mock +import six import testtools from yardstick import ssh @@ -48,13 +49,17 @@ deployment: self.mock_ssh_from_node.return_value = self.mock_ssh_obj self.mock_ssh_obj.wait = mock.Mock() self.mock_ssh_obj._put_file_shell = mock.Mock() + self._mock_log_info = mock.patch.object(plugin.LOG, 'info') + self.mock_log_info = self._mock_log_info.start() self.addCleanup(self._cleanup) def _cleanup(self): self._mock_ssh_from_node.stop() + self._mock_log_info.stop() - def test_install(self): + @mock.patch.object(six.moves.builtins, 'print') + def test_install(self, *args): args = mock.Mock() args.input_file = [mock.Mock()] with mock.patch.object(self.plugin, '_install_setup') as \ @@ -65,7 +70,8 @@ deployment: PluginTestCase.DEPLOYMENT) mock_run.assert_called_once_with(PluginTestCase.NAME) - def test_remove(self): + @mock.patch.object(six.moves.builtins, 'print') + def test_remove(self, *args): args = mock.Mock() args.input_file = [mock.Mock()] with mock.patch.object(self.plugin, '_remove_setup') as \ diff --git a/yardstick/tests/unit/benchmark/runner/test_base.py b/yardstick/tests/unit/benchmark/runner/test_base.py index 727207f5a..559c991f3 100644 --- a/yardstick/tests/unit/benchmark/runner/test_base.py +++ b/yardstick/tests/unit/benchmark/runner/test_base.py @@ -10,36 +10,40 @@ import time import mock -import unittest -from subprocess import CalledProcessError +import subprocess - -from yardstick.benchmark.runners import base +from yardstick.benchmark.runners import base as runner_base from yardstick.benchmark.runners import iteration +from yardstick.tests.unit import base as ut_base -class ActionTestCase(unittest.TestCase): +class ActionTestCase(ut_base.BaseUnitTestCase): - @mock.patch("yardstick.benchmark.runners.base.subprocess") - def test__execute_shell_command(self, mock_subprocess): - mock_subprocess.check_output.side_effect = CalledProcessError(-1, '') + def setUp(self): + self._mock_log = mock.patch.object(runner_base.log, 'error') + self.mock_log = self._mock_log.start() + self.addCleanup(self._stop_mocks) - self.assertEqual(base._execute_shell_command("")[0], -1) + def _stop_mocks(self): + self._mock_log.stop() - @mock.patch("yardstick.benchmark.runners.base.subprocess") - def test__single_action(self, mock_subprocess): - mock_subprocess.check_output.side_effect = CalledProcessError(-1, '') + @mock.patch.object(subprocess, 'check_output') + def test__execute_shell_command(self, mock_subprocess): + mock_subprocess.side_effect = subprocess.CalledProcessError(-1, '') + self.assertEqual(runner_base._execute_shell_command("")[0], -1) - base._single_action(0, "echo", mock.MagicMock()) + @mock.patch.object(subprocess, 'check_output') + def test__single_action(self, mock_subprocess): + mock_subprocess.side_effect = subprocess.CalledProcessError(-1, '') + runner_base._single_action(0, 'echo', mock.Mock()) - @mock.patch("yardstick.benchmark.runners.base.subprocess") + @mock.patch.object(subprocess, 'check_output') def test__periodic_action(self, mock_subprocess): - mock_subprocess.check_output.side_effect = CalledProcessError(-1, '') - - base._periodic_action(0, "echo", mock.MagicMock()) + mock_subprocess.side_effect = subprocess.CalledProcessError(-1, '') + runner_base._periodic_action(0, 'echo', mock.Mock()) -class RunnerTestCase(unittest.TestCase): +class RunnerTestCase(ut_base.BaseUnitTestCase): def setUp(self): config = { @@ -86,7 +90,7 @@ class RunnerTestCase(unittest.TestCase): self.assertEqual(idle_result, actual_result) def test__run_benchmark(self): - runner = base.Runner(mock.Mock()) + runner = runner_base.Runner(mock.Mock()) with self.assertRaises(NotImplementedError): runner._run_benchmark(mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()) diff --git a/yardstick/tests/unit/benchmark/runner/test_proxduration.py b/yardstick/tests/unit/benchmark/runner/test_proxduration.py index be1715aad..3299c5b05 100644 --- a/yardstick/tests/unit/benchmark/runner/test_proxduration.py +++ b/yardstick/tests/unit/benchmark/runner/test_proxduration.py @@ -1,23 +1,29 @@ -############################################################################## -# Copyright (c) 2018 Nokia and others. +# Copyright (c) 2018 Intel Corporation # -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import mock import unittest import multiprocessing import os -import time from yardstick.benchmark.runners import proxduration +from yardstick.common import constants from yardstick.common import exceptions as y_exc class ProxDurationRunnerTest(unittest.TestCase): + class MyMethod(object): SLA_VALIDATION_ERROR_SIDE_EFFECT = 1 BROAD_EXCEPTION_SIDE_EFFECT = 2 @@ -69,38 +75,37 @@ class ProxDurationRunnerTest(unittest.TestCase): @mock.patch.object(os, 'getpid') def test__worker_process_runner_id(self, mock_os_getpid): mock_os_getpid.return_value = 101 - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) - self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101) + self.assertEqual(101, self.scenario_cfg['runner']['runner_id']) def test__worker_process_called_with_cfg(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() def test__worker_process_called_with_cfg_loop(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.01} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() self.assertGreater(self.benchmark.my_method.call_count, 2) def test__worker_process_called_without_cfg(self): scenario_cfg = {'runner': {}} - aborted = multiprocessing.Event() aborted.set() - - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - scenario_cfg, {}, aborted, mock.Mock()) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', scenario_cfg, {}, + aborted, mock.Mock()) self.benchmark_cls.assert_called_once_with(scenario_cfg, {}) self.benchmark.setup.assert_called_once() @@ -108,188 +113,174 @@ class ProxDurationRunnerTest(unittest.TestCase): def test__worker_process_output_queue(self): self.benchmark.my_method = mock.Mock(return_value='my_result') - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - output_queue = multiprocessing.Queue() - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), output_queue) - time.sleep(0.1) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + output_queue = mock.Mock() + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), output_queue) self._assert_defaults__worker_run_setup_and_teardown() - self.assertEquals(output_queue.get(), 'my_result') + output_queue.put.assert_has_calls( + [mock.call('my_result', True, constants.QUEUE_PUT_TIMEOUT)]) def test__worker_process_output_queue_multiple_iterations(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = self.MyMethod() - - output_queue = multiprocessing.Queue() - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), output_queue) - time.sleep(0.1) + output_queue = mock.Mock() + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), output_queue) self._assert_defaults__worker_run_setup_and_teardown() - self.assertGreater(self.benchmark.my_method.count, 103) - - count = 101 - while not output_queue.empty(): - count += 1 - self.assertEquals(output_queue.get(), count) + for idx in range(102, 101 + len(output_queue.method_calls)): + output_queue.put.assert_has_calls( + [mock.call(idx, True, constants.QUEUE_PUT_TIMEOUT)]) def test__worker_process_queue(self): self.benchmark.my_method = self.MyMethod() - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - queue = multiprocessing.Queue() - timestamp = time.time() - proxduration._worker_process(queue, self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - time.sleep(0.1) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + queue = mock.Mock() + proxduration._worker_process( + queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - - result = queue.get() - self.assertGreater(result['timestamp'], timestamp) - self.assertEqual(result['errors'], '') - self.assertEqual(result['data'], {'my_key': 102}) - self.assertEqual(result['sequence'], 1) + benchmark_output = {'timestamp': mock.ANY, + 'sequence': 1, + 'data': {'my_key': 102}, + 'errors': ''} + queue.put.assert_has_calls( + [mock.call(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)]) def test__worker_process_queue_multiple_iterations(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = self.MyMethod() - - queue = multiprocessing.Queue() - timestamp = time.time() - proxduration._worker_process(queue, self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - time.sleep(0.1) + queue = mock.Mock() + proxduration._worker_process( + queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - self.assertGreater(self.benchmark.my_method.count, 103) - - count = 0 - while not queue.empty(): - count += 1 - result = queue.get() - self.assertGreater(result['timestamp'], timestamp) - self.assertEqual(result['errors'], '') - self.assertEqual(result['data'], {'my_key': count + 101}) - self.assertEqual(result['sequence'], count) + for idx in range(102, 101 + len(queue.method_calls)): + benchmark_output = {'timestamp': mock.ANY, + 'sequence': idx - 101, + 'data': {'my_key': idx}, + 'errors': ''} + queue.put.assert_has_calls( + [mock.call(benchmark_output, True, + constants.QUEUE_PUT_TIMEOUT)]) def test__worker_process_except_sla_validation_error_no_sla_cfg(self): self.benchmark.my_method = mock.Mock( side_effect=y_exc.SLAValidationError) - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self): + @mock.patch.object(proxduration.LOG, 'warning') + def test__worker_process_except_sla_validation_error_sla_cfg_monitor( + self, *args): self.scenario_cfg['sla'] = {'action': 'monitor'} - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = mock.Mock( side_effect=y_exc.SLAValidationError) - - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() def test__worker_process_raise_sla_validation_error_sla_cfg_default(self): self.scenario_cfg['sla'] = {} - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = mock.Mock( side_effect=y_exc.SLAValidationError) - with self.assertRaises(y_exc.SLAValidationError): - proxduration._worker_process(mock.Mock(), self.benchmark_cls, - 'my_method', self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock()) self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {}) self.benchmark.setup.assert_called_once() self.benchmark.my_method.assert_called_once_with({}) def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.scenario_cfg['sla'] = {'action': 'assert'} self.benchmark.my_method = mock.Mock( side_effect=y_exc.SLAValidationError) with self.assertRaises(y_exc.SLAValidationError): - proxduration._worker_process(mock.Mock(), self.benchmark_cls, - 'my_method', self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock()) self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {}) self.benchmark.setup.assert_called_once() self.benchmark.my_method.assert_called_once_with({}) - def test__worker_process_queue_on_sla_validation_error_monitor(self): + @mock.patch.object(proxduration.LOG, 'warning') + def test__worker_process_queue_on_sla_validation_error_monitor( + self, *args): self.scenario_cfg['sla'] = {'action': 'monitor'} - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = self.MyMethod( side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT) - - queue = multiprocessing.Queue() - timestamp = time.time() - proxduration._worker_process(queue, self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - time.sleep(0.1) + queue = mock.Mock() + proxduration._worker_process( + queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - - result = queue.get() - self.assertGreater(result['timestamp'], timestamp) - self.assertEqual(result['errors'], ('My Case SLA validation failed. ' - 'Error: my error message',)) - self.assertEqual(result['data'], {'my_key': 102}) - self.assertEqual(result['sequence'], 1) - - def test__worker_process_broad_exception(self): + benchmark_output = {'timestamp': mock.ANY, + 'sequence': 1, + 'data': {'my_key': 102}, + 'errors': ('My Case SLA validation failed. ' + 'Error: my error message', )} + queue.put.assert_has_calls( + [mock.call(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)]) + + @mock.patch.object(proxduration.LOG, 'exception') + def test__worker_process_broad_exception(self, *args): self.benchmark.my_method = mock.Mock( side_effect=y_exc.YardstickException) - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - def test__worker_process_queue_on_broad_exception(self): + @mock.patch.object(proxduration.LOG, 'exception') + def test__worker_process_queue_on_broad_exception(self, *args): self.benchmark.my_method = self.MyMethod( side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT) - - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - queue = multiprocessing.Queue() - timestamp = time.time() - proxduration._worker_process(queue, self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - time.sleep(0.1) - - self._assert_defaults__worker_run_setup_and_teardown() - - result = queue.get() - self.assertGreater(result['timestamp'], timestamp) - self.assertNotEqual(result['errors'], '') - self.assertEqual(result['data'], {'my_key': 102}) - self.assertEqual(result['sequence'], 1) - - def test__worker_process_benchmark_teardown_on_broad_exception(self): + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + queue = mock.Mock() + proxduration._worker_process( + queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) + + benchmark_output = {'timestamp': mock.ANY, + 'sequence': 1, + 'data': {'my_key': 102}, + 'errors': mock.ANY} + queue.put.assert_has_calls( + [mock.call(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)]) + + @mock.patch.object(proxduration.LOG, 'exception') + def test__worker_process_benchmark_teardown_on_broad_exception( + self, *args): self.benchmark.teardown = mock.Mock( side_effect=y_exc.YardstickException) - - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} with self.assertRaises(SystemExit) as raised: - proxduration._worker_process(mock.Mock(), self.benchmark_cls, - 'my_method', self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - self.assertEqual(raised.exception.code, 1) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock()) + self.assertEqual(1, raised.exception.code) self._assert_defaults__worker_run_setup_and_teardown() diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py index 2190e9337..5f342df7d 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py @@ -7,10 +7,6 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf - -from __future__ import absolute_import - import os import unittest @@ -22,7 +18,7 @@ from yardstick.benchmark.scenarios.networking import iperf3 from yardstick.common import exceptions as y_exc -@mock.patch('yardstick.benchmark.scenarios.networking.iperf3.ssh') +@mock.patch.object(iperf3, 'ssh') class IperfTestCase(unittest.TestCase): output_name_tcp = 'iperf3_sample_output.json' output_name_udp = 'iperf3_sample_output_udp.json' @@ -41,9 +37,14 @@ class IperfTestCase(unittest.TestCase): 'ipaddr': '172.16.0.138', } } + self._mock_log_info = mock.patch.object(iperf3.LOG, 'info') + self.mock_log_info = self._mock_log_info.start() + self.addCleanup(self._stop_mocks) - def test_iperf_successful_setup(self, mock_ssh): + def _stop_mocks(self): + self._mock_log_info.stop() + def test_iperf_successful_setup(self, mock_ssh): p = iperf3.Iperf({}, self.ctx) mock_ssh.SSH.from_node().execute.return_value = (0, '', '') @@ -53,13 +54,11 @@ class IperfTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.assert_called_with("iperf3 -s -D") def test_iperf_unsuccessful_setup(self, mock_ssh): - p = iperf3.Iperf({}, self.ctx) mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, p.setup) def test_iperf_successful_teardown(self, mock_ssh): - p = iperf3.Iperf({}, self.ctx) mock_ssh.SSH.from_node().execute.return_value = (0, '', '') p.host = mock_ssh.SSH.from_node() @@ -70,7 +69,6 @@ class IperfTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.assert_called_with("pkill iperf3") def test_iperf_successful_no_sla(self, mock_ssh): - options = {} args = {'options': options} result = {} @@ -86,7 +84,6 @@ class IperfTestCase(unittest.TestCase): self.assertEqual(result, expected_result) def test_iperf_successful_sla(self, mock_ssh): - options = {} args = { 'options': options, @@ -105,7 +102,6 @@ class IperfTestCase(unittest.TestCase): self.assertEqual(result, expected_result) def test_iperf_unsuccessful_sla(self, mock_ssh): - options = {} args = { 'options': options, @@ -174,7 +170,6 @@ class IperfTestCase(unittest.TestCase): self.assertEqual(result, expected_result) def test_iperf_unsuccessful_script_error(self, mock_ssh): - options = {} args = {'options': options} result = {} @@ -186,7 +181,8 @@ class IperfTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, p.run, result) - def _read_sample_output(self, filename): + @staticmethod + def _read_sample_output(filename): curr_path = os.path.dirname(os.path.abspath(__file__)) output = os.path.join(curr_path, filename) with open(output) as f: diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py index c05d2ced2..db6f9cc89 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py @@ -18,8 +18,8 @@ import time import mock import unittest -from yardstick.benchmark.scenarios.networking import vsperf_dpdk from yardstick import exceptions as y_exc +from yardstick.benchmark.scenarios.networking import vsperf_dpdk class VsperfDPDKTestCase(unittest.TestCase): @@ -59,17 +59,18 @@ class VsperfDPDKTestCase(unittest.TestCase): self.scenario = vsperf_dpdk.VsperfDPDK(self.args, self.ctx) - self._mock_ssh = mock.patch( - 'yardstick.benchmark.scenarios.networking.vsperf_dpdk.ssh') + self._mock_ssh = mock.patch.object(vsperf_dpdk, 'ssh') self.mock_ssh = self._mock_ssh.start() self._mock_subprocess_call = mock.patch.object(subprocess, 'call') self.mock_subprocess_call = self._mock_subprocess_call.start() - + self._mock_log_info = mock.patch.object(vsperf_dpdk.LOG, 'info') + self.mock_log_info = self._mock_log_info.start() self.addCleanup(self._cleanup) def _cleanup(self): self._mock_ssh.stop() self._mock_subprocess_call.stop() + self._mock_log_info.stop() def test_setup(self): # setup() specific mocks diff --git a/yardstick/tests/unit/common/test_openstack_utils.py b/yardstick/tests/unit/common/test_openstack_utils.py index d02a34d24..f6a0bdcc1 100644 --- a/yardstick/tests/unit/common/test_openstack_utils.py +++ b/yardstick/tests/unit/common/test_openstack_utils.py @@ -7,11 +7,13 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from oslo_utils import uuidutils -import unittest +import os + import mock +from oslo_utils import uuidutils import shade from shade import exc +import unittest from yardstick.common import constants from yardstick.common import openstack_utils @@ -28,11 +30,12 @@ class GetCredentialsTestCase(unittest.TestCase): class GetHeatApiVersionTestCase(unittest.TestCase): - def test_get_heat_api_version_check_result(self): + @mock.patch.object(openstack_utils, 'log') + def test_get_heat_api_version_check_result(self, *args): API = 'HEAT_API_VERSION' expected_result = '2' - with mock.patch.dict('os.environ', {API: '2'}, clear=True): + with mock.patch.dict(os.environ, {API: '2'}, clear=True): api_version = openstack_utils.get_heat_api_version() self.assertEqual(api_version, expected_result) @@ -277,6 +280,12 @@ class CreateSecurityGroupRuleTestCase(unittest.TestCase): self.mock_shade_client = mock.Mock() self.secgroup_name_or_id = 'sg_name_id' self.mock_shade_client.create_security_group_rule = mock.Mock() + self._mock_log = mock.patch.object(openstack_utils, 'log') + self.mock_log = self._mock_log.start() + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_log.stop() def test_create_security_group_rule(self): self.mock_shade_client.create_security_group_rule.return_value = ( @@ -285,14 +294,13 @@ class CreateSecurityGroupRuleTestCase(unittest.TestCase): self.mock_shade_client, self.secgroup_name_or_id) self.assertTrue(output) - @mock.patch.object(openstack_utils, 'log') - def test_create_security_group_rule_exception(self, mock_logger): + def test_create_security_group_rule_exception(self): self.mock_shade_client.create_security_group_rule.side_effect = ( exc.OpenStackCloudException('error message')) output = openstack_utils.create_security_group_rule( self.mock_shade_client, self.secgroup_name_or_id) - mock_logger.error.assert_called_once() + self.mock_log.error.assert_called_once() self.assertFalse(output) @@ -321,6 +329,12 @@ class SecurityGroupTestCase(unittest.TestCase): self.sg_name = 'sg_name' self.sg_description = 'sg_description' self._uuid = uuidutils.generate_uuid() + self._mock_log = mock.patch.object(openstack_utils, 'log') + self.mock_log = self._mock_log.start() + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_log.stop() def test_create_security_group_full_existing_security_group(self): self.mock_shade_client.get_security_group.return_value = ( @@ -330,21 +344,18 @@ class SecurityGroupTestCase(unittest.TestCase): self.mock_shade_client.get_security_group.assert_called_once() self.assertEqual(self._uuid, output) - @mock.patch.object(openstack_utils, 'log') - def test_create_security_group_full_non_existing_security_group( - self, mock_logger): + def test_create_security_group_full_non_existing_security_group(self): self.mock_shade_client.get_security_group.return_value = None self.mock_shade_client.create_security_group.side_effect = ( exc.OpenStackCloudException('error message')) output = openstack_utils.create_security_group_full( self.mock_shade_client, self.sg_name, self.sg_description) - mock_logger.error.assert_called_once() + self.mock_log.error.assert_called_once() self.assertIsNone(output) @mock.patch.object(openstack_utils, 'create_security_group_rule') - @mock.patch.object(openstack_utils, 'log') def test_create_security_group_full_create_rule_fail( - self, mock_logger, mock_create_security_group_rule): + self, mock_create_security_group_rule): self.mock_shade_client.get_security_group.return_value = None self.mock_shade_client.create_security_group.return_value = ( {'name': 'name', 'id': self._uuid}) @@ -353,7 +364,7 @@ class SecurityGroupTestCase(unittest.TestCase): self.mock_shade_client, self.sg_name, self.sg_description) mock_create_security_group_rule.assert_called() self.mock_shade_client.delete_security_group(self.sg_name) - mock_logger.error.assert_called_once() + self.mock_log.error.assert_called_once() self.assertIsNone(output) @mock.patch.object(openstack_utils, 'create_security_group_rule') @@ -369,10 +380,6 @@ class SecurityGroupTestCase(unittest.TestCase): self.mock_shade_client.delete_security_group(self.sg_name) self.assertEqual(self._uuid, output) -# ********************************************* -# NOVA -# ********************************************* - class CreateInstanceTestCase(unittest.TestCase): @@ -543,10 +550,6 @@ class GetFlavorTestCase(unittest.TestCase): mock_logger.error.assert_called_once() self.assertIsNone(output) -# ********************************************* -# CINDER -# ********************************************* - class GetVolumeIDTestCase(unittest.TestCase): @@ -664,22 +667,23 @@ class DetachVolumeTestCase(unittest.TestCase): self.assertFalse(output) -# ********************************************* -# GLANCE -# ********************************************* - class CreateImageTestCase(unittest.TestCase): def setUp(self): self.mock_shade_client = mock.Mock() self._uuid = uuidutils.generate_uuid() self.name = 'image_name' + self._mock_log = mock.patch.object(openstack_utils, 'log') + self.mock_log = self._mock_log.start() + self.addCleanup(self._stop_mock) - @mock.patch.object(openstack_utils, 'log') - def test_create_image_already_exit(self, mock_logger): + def _stop_mock(self): + self._mock_log.stop() + + def test_create_image_already_exit(self): self.mock_shade_client.get_image_id.return_value = self._uuid output = openstack_utils.create_image(self.mock_shade_client, self.name) - mock_logger.info.assert_called_once() + self.mock_log.info.assert_called_once() self.assertEqual(self._uuid, output) def test_create_image(self): @@ -688,15 +692,14 @@ class CreateImageTestCase(unittest.TestCase): output = openstack_utils.create_image(self.mock_shade_client, self.name) self.assertEqual(self._uuid, output) - @mock.patch.object(openstack_utils, 'log') - def test_create_image_exception(self, mock_logger): + def test_create_image_exception(self): self.mock_shade_client.get_image_id.return_value = None self.mock_shade_client.create_image.side_effect = ( exc.OpenStackCloudException('error message')) output = openstack_utils.create_image(self.mock_shade_client, self.name) - mock_logger.error.assert_called_once() + self.mock_log.error.assert_called_once() self.assertIsNone(output) diff --git a/yardstick/tests/unit/common/test_process.py b/yardstick/tests/unit/common/test_process.py index 1c6dfec27..e0933c6ac 100644 --- a/yardstick/tests/unit/common/test_process.py +++ b/yardstick/tests/unit/common/test_process.py @@ -90,10 +90,11 @@ class ExecuteTestCase(unittest.TestCase): additional_env=self.additional_env) self.assertEqual(self.stdout, out) - def test_execute_exception(self): + @mock.patch.object(process, 'LOG') + def test_execute_exception(self, *args): self.obj.returncode = self.RET_CODE_WRONG - self.assertRaises(exceptions.ProcessExecutionError, process.execute, - self.input_cmd, additional_env=self.additional_env) + with self.assertRaises(exceptions.ProcessExecutionError): + process.execute(self.input_cmd, additional_env=self.additional_env) self.obj.communicate.assert_called_once_with(None) def test_execute_with_extra_code(self): @@ -107,7 +108,8 @@ class ExecuteTestCase(unittest.TestCase): additional_env=self.additional_env) self.assertEqual(self.stdout, out) - def test_execute_exception_no_check(self): + @mock.patch.object(process, 'LOG') + def test_execute_exception_no_check(self, *args): self.obj.returncode = self.RET_CODE_WRONG out = process.execute(self.input_cmd, additional_env=self.additional_env, diff --git a/yardstick/tests/unit/network_services/collector/test_subscriber.py b/yardstick/tests/unit/network_services/collector/test_subscriber.py index 14e26f7fe..4271f852c 100644 --- a/yardstick/tests/unit/network_services/collector/test_subscriber.py +++ b/yardstick/tests/unit/network_services/collector/test_subscriber.py @@ -38,6 +38,15 @@ class MockVnfAprrox(object): class CollectorTestCase(unittest.TestCase): + NODES = {'context1': [{'name': 'node1', + 'ip': '1.2.3.4', + 'collectd': { + 'plugins': {'abc': 12, 'def': 34}, + 'interval': 987 + }, + }] + } + def setUp(self): vnf = MockVnfAprrox() vnf.start_collect = mock.Mock() @@ -47,30 +56,46 @@ class CollectorTestCase(unittest.TestCase): mock_instance = mock.Mock() mock_instance.execute.return_value = 0, '', '' mock_ssh.from_node.return_value = mock_instance - self.collector = subscriber.Collector([vnf]) + self.collector = subscriber.Collector([vnf], self.NODES) def tearDown(self): self.ssh_patch.stop() def test___init__(self, *_): vnf = MockVnfAprrox() - collector = subscriber.Collector([vnf]) + collector = subscriber.Collector([vnf], self.NODES) self.assertEqual(len(collector.vnfs), 1) + self.assertEqual(len(collector.nodes), 1) def test_start(self, *_): + resource_profile = mock.MagicMock() + self.collector.resource_profiles = {'key': resource_profile} + self.collector.bin_path = 'path' + self.assertIsNone(self.collector.start()) for vnf in self.collector.vnfs: vnf.start_collect.assert_called_once() + for resource_profile in self.collector.resource_profiles.values(): + resource_profile.initiate_systemagent.assert_called_once_with('path') + resource_profile.start.assert_called_once() + resource_profile.amqp_process_for_nfvi_kpi.assert_called_once() + def test_stop(self, *_): + resource_profile = mock.MagicMock() + self.collector.resource_profiles = {'key': resource_profile} + self.assertIsNone(self.collector.stop()) for vnf in self.collector.vnfs: vnf.stop_collect.assert_called_once() + for resource in self.collector.resource_profiles.values(): + resource.stop.assert_called_once() + def test_get_kpi(self, *_): result = self.collector.get_kpi() - self.assertEqual(1, len(result)) + self.assertEqual(2, len(result)) self.assertEqual(4, len(result["vnf__1"])) self.assertEqual(result["vnf__1"]["pkt_in_up_stream"], 100) self.assertEqual(result["vnf__1"]["pkt_drop_up_stream"], 5) diff --git a/yardstick/tests/unit/network_services/nfvi/test_resource.py b/yardstick/tests/unit/network_services/nfvi/test_resource.py index de9679456..c06658218 100644 --- a/yardstick/tests/unit/network_services/nfvi/test_resource.py +++ b/yardstick/tests/unit/network_services/nfvi/test_resource.py @@ -18,6 +18,7 @@ import mock import unittest from yardstick.common import exceptions +from yardstick.common.exceptions import ResourceCommandError from yardstick.network_services.nfvi.resource import ResourceProfile from yardstick.network_services.nfvi import resource, collectd @@ -139,18 +140,38 @@ class TestResourceProfile(unittest.TestCase): self.resource_profile._start_collectd(ssh_mock, "/opt/nsb_bin") ssh_mock.execute = mock.Mock(return_value=(1, "", "")) - self.assertIsNone(self.resource_profile._start_collectd(ssh_mock, - "/opt/nsb_bin")) + with self.assertRaises(ResourceCommandError): + self.resource_profile._start_collectd(ssh_mock, "/opt/nsb_bin") + + def test__reset_rabbitmq(self): + ssh_mock = mock.Mock() + ssh_mock.execute = mock.Mock(return_value=(1, "", "")) + with self.assertRaises(exceptions.ResourceCommandError): + self.resource_profile._reset_rabbitmq(ssh_mock) + + def test__check_rabbitmq_user(self): + ssh_mock = mock.Mock() + ssh_mock.execute = mock.Mock(return_value=(0, "title\nadmin\t[]", "")) + self.assertTrue(self.resource_profile._check_rabbitmq_user(ssh_mock)) + + def test__set_rabbitmq_admin_user(self): + ssh_mock = mock.Mock() + ssh_mock.execute = mock.Mock(return_value=(1, "", "")) + with self.assertRaises(exceptions.ResourceCommandError): + self.resource_profile._set_rabbitmq_admin_user(ssh_mock) def test__start_rabbitmq(self): ssh_mock = mock.Mock() - ssh_mock.execute = mock.Mock(return_value=(0, "RabbitMQ", "")) - self.assertIsNone(self.resource_profile._start_rabbitmq(ssh_mock)) + self.resource_profile._reset_rabbitmq = mock.Mock() + self.resource_profile._set_rabbitmq_admin_user = mock.Mock() - ssh_mock.execute = mock.Mock(return_value=(0, "", "")) + self.resource_profile._reset_mq_flag = True + ssh_mock.execute = mock.Mock(return_value=(1, "", "")) with self.assertRaises(exceptions.ResourceCommandError): self.resource_profile._start_rabbitmq(ssh_mock) + self.resource_profile._reset_mq_flag = False + self.resource_profile._check_rabbitmq_user = mock.Mock(return_value=False) ssh_mock.execute = mock.Mock(return_value=(1, "", "")) with self.assertRaises(exceptions.ResourceCommandError): self.resource_profile._start_rabbitmq(ssh_mock) diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py index 6b3532fa2..3bb8b9192 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from copy import deepcopy +import copy import mock import unittest @@ -440,6 +440,12 @@ class TestIXIARFC2544Profile(unittest.TestCase): result = r_f_c2544_profile._get_ixia_traffic_profile(profile_data, mac) self.assertIsNotNone(result) + def test__init__(self): + t_profile_data = copy.deepcopy(self.TRAFFIC_PROFILE) + t_profile_data['traffic_profile']['frame_rate'] = 12345678 + r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(t_profile_data) + self.assertEqual(12345678, r_f_c2544_profile.rate) + def test__get_ixia_traffic_profile_default_args(self): r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile( self.TRAFFIC_PROFILE) @@ -521,7 +527,7 @@ class TestIXIARFC2544Profile(unittest.TestCase): traffic_generator.vnfd_helper.port_num.side_effect = ports_expected traffic_generator.client.return_value = True - traffic_profile = deepcopy(self.TRAFFIC_PROFILE) + traffic_profile = copy.deepcopy(self.TRAFFIC_PROFILE) traffic_profile.update({ "uplink_0": ["xe0"], "downlink_0": ["xe1", "xe2"], diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py index 036746e6b..c062308e8 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py @@ -11,23 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# import unittest import mock -from yardstick.tests import STL_MOCKS +from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxTestDataTuple +from yardstick.network_services.traffic_profile import prox_binsearch -STLClient = mock.MagicMock() -stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) -stl_patch.start() -if stl_patch: - from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxTestDataTuple - from yardstick.network_services.traffic_profile.prox_binsearch import ProxBinSearchProfile +class TestProxBinSearchProfile(unittest.TestCase): + def setUp(self): + self._mock_log_info = mock.patch.object(prox_binsearch.LOG, 'info') + self.mock_log_info = self._mock_log_info.start() + self.addCleanup(self._stop_mocks) -class TestProxBinSearchProfile(unittest.TestCase): + def _stop_mocks(self): + self._mock_log_info.stop() def test_execute_1(self): def target(*args, **_): @@ -60,7 +60,7 @@ class TestProxBinSearchProfile(unittest.TestCase): profile_helper = mock.MagicMock() profile_helper.run_test = target - profile = ProxBinSearchProfile(tp_config) + profile = prox_binsearch.ProxBinSearchProfile(tp_config) profile.init(mock.MagicMock()) profile._profile_helper = profile_helper @@ -138,7 +138,7 @@ class TestProxBinSearchProfile(unittest.TestCase): profile_helper = mock.MagicMock() profile_helper.run_test = target - profile = ProxBinSearchProfile(tp_config) + profile = prox_binsearch.ProxBinSearchProfile(tp_config) profile.init(mock.MagicMock()) profile._profile_helper = profile_helper @@ -173,7 +173,7 @@ class TestProxBinSearchProfile(unittest.TestCase): profile_helper = mock.MagicMock() profile_helper.run_test = target - profile = ProxBinSearchProfile(tp_config) + profile = prox_binsearch.ProxBinSearchProfile(tp_config) profile.init(mock.MagicMock()) profile._profile_helper = profile_helper @@ -227,7 +227,7 @@ class TestProxBinSearchProfile(unittest.TestCase): profile_helper = mock.MagicMock() profile_helper.run_test = target - profile = ProxBinSearchProfile(tp_config) + profile = prox_binsearch.ProxBinSearchProfile(tp_config) profile.init(mock.MagicMock()) profile._profile_helper = profile_helper diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/acl_1rule.yaml b/yardstick/tests/unit/network_services/vnf_generic/vnf/acl_1rule.yaml deleted file mode 100644 index b184a29e2..000000000 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/acl_1rule.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2016-2017 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -access-list1: - acl: - access-list-entries: - - ace: - ace-oper-data: - match-counter: 0 - actions: drop,count - matches: - destination-ipv4-network: 152.16.40.20/24 - destination-port-range: - lower-port: 0 - upper-port: 65535 - source-ipv4-network: 0.0.0.0/0 - source-port-range: - lower-port: 0 - upper-port: 65535 - rule-name: rule1588 - - ace: - ace-oper-data: - match-counter: 0 - actions: drop,count - matches: - destination-ipv4-network: 0.0.0.0/0 - destination-port-range: - lower-port: 0 - upper-port: 65535 - source-ipv4-network: 152.16.100.20/24 - source-port-range: - lower-port: 0 - upper-port: 65535 - rule-name: rule1589 - acl-name: sample-ipv4-acl - acl-type: ipv4-acl diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py index ce32a31e2..f04d2c617 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py @@ -23,6 +23,7 @@ from yardstick.tests import STL_MOCKS from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh from yardstick.common import utils from yardstick.common import exceptions +from yardstick.benchmark.contexts import base as ctx_base STLClient = mock.MagicMock() @@ -30,7 +31,7 @@ stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) stl_patch.start() if stl_patch: - from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxVnf + from yardstick.network_services.vnf_generic.vnf import acl_vnf from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper from yardstick.network_services.nfvi.resource import ResourceProfile from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxSetupEnvSetupEnvHelper @@ -146,7 +147,7 @@ class TestAclApproxVnf(unittest.TestCase): 'ip': '1.2.1.1', 'interfaces': {'xe0': {'local_iface_name': 'ens513f0', - 'vld_id': AclApproxVnf.DOWNLINK, + 'vld_id': acl_vnf.AclApproxVnf.DOWNLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.40.20', 'dst_mac': '00:00:00:00:00:01', @@ -174,7 +175,7 @@ class TestAclApproxVnf(unittest.TestCase): 'ip': '1.2.1.1', 'interfaces': {'xe0': {'local_iface_name': 'ens785f0', - 'vld_id': AclApproxVnf.UPLINK, + 'vld_id': acl_vnf.AclApproxVnf.UPLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.100.20', 'dst_mac': '00:00:00:00:00:02', @@ -199,7 +200,7 @@ class TestAclApproxVnf(unittest.TestCase): 'ip': '1.2.1.1', 'interfaces': {'xe0': {'local_iface_name': 'ens786f0', - 'vld_id': AclApproxVnf.UPLINK, + 'vld_id': acl_vnf.AclApproxVnf.UPLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.100.19', 'dst_mac': '00:00:00:00:00:04', @@ -209,7 +210,7 @@ class TestAclApproxVnf(unittest.TestCase): 'vpci': '0000:05:00.0', 'dpdk_port_num': 0}, 'xe1': {'local_iface_name': 'ens786f1', - 'vld_id': AclApproxVnf.DOWNLINK, + 'vld_id': acl_vnf.AclApproxVnf.DOWNLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.40.19', 'dst_mac': '00:00:00:00:00:03', @@ -245,22 +246,31 @@ class TestAclApproxVnf(unittest.TestCase): def test___init__(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = AclApproxVnf(name, vnfd) + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) self.assertIsNone(acl_approx_vnf._vnf_process) @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) def test_collect_kpi(self, ssh, *args): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = AclApproxVnf(name, vnfd) + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) + acl_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {acl_approx_vnf.name: "mock"} + } acl_approx_vnf.q_in = mock.MagicMock() acl_approx_vnf.q_out = mock.MagicMock() acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0) acl_approx_vnf.resource = mock.Mock(autospec=ResourceProfile) acl_approx_vnf.vnf_execute = mock.Mock(return_value="") - result = {'packets_dropped': 0, 'packets_fwd': 0, 'packets_in': 0} + result = { + 'physical_node': 'mock_node', + 'packets_dropped': 0, + 'packets_fwd': 0, + 'packets_in': 0 + } self.assertEqual(result, acl_approx_vnf.collect_kpi()) @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") @@ -269,7 +279,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = AclApproxVnf(name, vnfd) + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf.q_in = mock.MagicMock() acl_approx_vnf.q_out = mock.MagicMock() acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -281,7 +291,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = AclApproxVnf(name, vnfd) + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf.q_in = mock.MagicMock() acl_approx_vnf.q_out = mock.MagicMock() acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -302,7 +312,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = AclApproxVnf(name, vnfd) + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf._build_config = mock.MagicMock() acl_approx_vnf.queue_wrapper = mock.MagicMock() acl_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg @@ -322,7 +332,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = AclApproxVnf(name, vnfd) + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf.deploy_helper = mock.MagicMock() acl_approx_vnf.resource_helper = mock.MagicMock() acl_approx_vnf._build_config = mock.MagicMock() @@ -340,7 +350,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = AclApproxVnf(name, vnfd) + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf._vnf_process = mock.MagicMock() acl_approx_vnf._vnf_process.terminate = mock.Mock() acl_approx_vnf.used_drivers = {"01:01.0": "i40e", diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py index 6b6f3ef34..635ca41a2 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py @@ -322,9 +322,13 @@ class TestCgnaptApproxVnf(unittest.TestCase): self.assertIsNone(cgnapt_approx_vnf._vnf_process) @mock.patch.object(process, 'check_if_process_failed') + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') def test_collect_kpi(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd) + cgnapt_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {cgnapt_approx_vnf.name: "mock"} + } cgnapt_approx_vnf._vnf_process = mock.MagicMock( **{"is_alive.return_value": True, "exitcode": None}) cgnapt_approx_vnf.q_in = mock.MagicMock() @@ -332,7 +336,12 @@ class TestCgnaptApproxVnf(unittest.TestCase): cgnapt_approx_vnf.q_out.qsize = mock.Mock(return_value=0) cgnapt_approx_vnf.resource = mock.Mock( autospec=resource.ResourceProfile) - result = {'packets_dropped': 0, 'packets_fwd': 0, 'packets_in': 0} + result = { + 'physical_node': 'mock_node', + 'packets_dropped': 0, + 'packets_fwd': 0, + 'packets_in': 0 + } with mock.patch.object(cgnapt_approx_vnf, 'get_stats', return_value=''): self.assertEqual(result, cgnapt_approx_vnf.collect_kpi()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py index b23854e9f..678e58056 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py @@ -20,6 +20,7 @@ import mock from copy import deepcopy from yardstick.tests import STL_MOCKS +from yardstick.benchmark.contexts import base as ctx_base SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper' @@ -29,7 +30,7 @@ stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) stl_patch.start() if stl_patch: - from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf + from yardstick.network_services.vnf_generic.vnf import prox_vnf from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh @@ -187,7 +188,7 @@ class TestProxApproxVnf(unittest.TestCase): 'interfaces': { 'xe0': { 'local_iface_name': 'ens513f0', - 'vld_id': ProxApproxVnf.DOWNLINK, + 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.40.20', 'dst_mac': '00:00:00:00:00:01', @@ -221,7 +222,7 @@ class TestProxApproxVnf(unittest.TestCase): 'interfaces': { 'xe0': { 'local_iface_name': 'ens785f0', - 'vld_id': ProxApproxVnf.UPLINK, + 'vld_id': prox_vnf.ProxApproxVnf.UPLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.100.20', 'dst_mac': '00:00:00:00:00:02', @@ -252,7 +253,7 @@ class TestProxApproxVnf(unittest.TestCase): 'interfaces': { 'xe0': { 'local_iface_name': 'ens786f0', - 'vld_id': ProxApproxVnf.UPLINK, + 'vld_id': prox_vnf.ProxApproxVnf.UPLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.100.19', 'dst_mac': '00:00:00:00:00:04', @@ -264,7 +265,7 @@ class TestProxApproxVnf(unittest.TestCase): }, 'xe1': { 'local_iface_name': 'ens786f1', - 'vld_id': ProxApproxVnf.DOWNLINK, + 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.40.19', 'dst_mac': '00:00:00:00:00:03', @@ -316,16 +317,21 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test___init__(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) self.assertIsNone(prox_approx_vnf._vnf_process) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) def test_collect_kpi_no_client(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {prox_approx_vnf.name: "mock"} + } prox_approx_vnf.resource_helper = None expected = { + 'physical_node': 'mock_node', 'packets_in': 0, 'packets_dropped': 0, 'packets_fwd': 0, @@ -334,6 +340,7 @@ class TestProxApproxVnf(unittest.TestCase): result = prox_approx_vnf.collect_kpi() self.assertEqual(result, expected) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) def test_collect_kpi(self, ssh, *args): mock_ssh(ssh) @@ -343,10 +350,14 @@ class TestProxApproxVnf(unittest.TestCase): [2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]] resource_helper.collect_collectd_kpi.return_value = {'core': {'result': 234}} - prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {prox_approx_vnf.name: "mock"} + } prox_approx_vnf.resource_helper = resource_helper expected = { + 'physical_node': 'mock_node', 'packets_in': 4, 'packets_dropped': 4, 'packets_fwd': 8, @@ -359,13 +370,16 @@ class TestProxApproxVnf(unittest.TestCase): self.assertNotEqual(result['packets_fwd'], 0) self.assertNotEqual(result['packets_fwd'], 0) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) def test_collect_kpi_error(self, ssh, *args): mock_ssh(ssh) resource_helper = mock.MagicMock() - - prox_approx_vnf = ProxApproxVnf(NAME, deepcopy(self.VNFD0)) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, deepcopy(self.VNFD0)) + prox_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {prox_approx_vnf.name: "mock"} + } prox_approx_vnf.resource_helper = resource_helper prox_approx_vnf.vnfd_helper['vdu'][0]['external-interface'] = [] prox_approx_vnf.vnfd_helper.port_pairs.interfaces = [] @@ -385,7 +399,7 @@ class TestProxApproxVnf(unittest.TestCase): def test_run_prox(self, ssh, *_): mock_ssh(ssh) - prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG prox_approx_vnf.ssh_helper.join_bin_path.return_value = '/tool_path12/tool_file34' prox_approx_vnf.setup_helper.remote_path = 'configs/file56.cfg' @@ -399,7 +413,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def bad_test_instantiate(self, *args): - prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.scenario_helper = mock.MagicMock() prox_approx_vnf.setup_helper = mock.MagicMock() # we can't mock super @@ -409,7 +423,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test_wait_for_instantiate_panic(self, ssh, *args): mock_ssh(ssh, exec_result=(1, "", "")) - prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf._vnf_process = mock.MagicMock(**{"is_alive.return_value": True}) prox_approx_vnf._run_prox = mock.Mock(return_value=0) prox_approx_vnf.WAIT_TIME = 0 @@ -421,7 +435,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test_terminate(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf._vnf_process = mock.MagicMock() prox_approx_vnf._vnf_process.terminate = mock.Mock() prox_approx_vnf.ssh_helper = mock.MagicMock() @@ -433,7 +447,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test__vnf_up_post(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.resource_helper = resource_helper = mock.Mock() prox_approx_vnf._vnf_up_post() @@ -442,7 +456,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test_vnf_execute_oserror(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.resource_helper = resource_helper = mock.Mock() resource_helper.execute.side_effect = OSError(errno.EPIPE, "") diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py index 5574c6770..edd0ff796 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py @@ -18,6 +18,7 @@ import mock from yardstick.tests import STL_MOCKS from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh +from yardstick.benchmark.contexts import base as ctx_base STLClient = mock.MagicMock() @@ -214,15 +215,25 @@ class TestRouterVNF(unittest.TestCase): stats = RouterVNF.get_stats(self.IP_SHOW_STATS_OUTPUT) self.assertDictEqual(stats, self.STATS) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") @mock.patch(SSH_HELPER) - def test_collect_kpi(self, ssh, _): + def test_collect_kpi(self, ssh, *args): m = mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] router_vnf = RouterVNF(name, vnfd) + router_vnf.scenario_helper.scenario_cfg = { + 'nodes': {router_vnf.name: "mock"} + } router_vnf.ssh_helper = m - result = {'packets_dropped': 0, 'packets_fwd': 0, 'packets_in': 0, 'link_stats': {}} + result = { + 'physical_node': 'mock_node', + 'packets_dropped': 0, + 'packets_fwd': 0, + 'packets_in': 0, + 'link_stats': {} + } self.assertEqual(result, router_vnf.collect_kpi()) @mock.patch(SSH_HELPER) @@ -235,9 +246,9 @@ class TestRouterVNF(unittest.TestCase): router_vnf._run() router_vnf.ssh_helper.drop_connection.assert_called_once() - @mock.patch("yardstick.network_services.vnf_generic.vnf.router_vnf.Context") + @mock.patch.object(ctx_base, 'Context') @mock.patch(SSH_HELPER) - def test_instantiate(self, ssh, _): + def test_instantiate(self, ssh, *args): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py index ce849d174..331e80d00 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py @@ -18,7 +18,6 @@ import unittest import mock import six -from yardstick.benchmark.contexts.base import Context from yardstick.common import exceptions as y_exceptions from yardstick.common import utils from yardstick.network_services.nfvi.resource import ResourceProfile @@ -35,6 +34,7 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper from yardstick.tests.unit.network_services.vnf_generic.vnf import test_base +from yardstick.benchmark.contexts import base as ctx_base class MockError(Exception): @@ -1533,33 +1533,20 @@ class TestSampleVnf(unittest.TestCase): self.assertIsNotNone(sample_vnf.queue_wrapper) self.assertIsNotNone(sample_vnf._vnf_process) + @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') @mock.patch("yardstick.ssh.SSH") - def test_instantiate(self, ssh): + def test_instantiate(self, ssh, *args): test_base.mock_ssh(ssh) - nodes = { 'vnf1': 'name1', 'vnf2': 'name2', } - context1 = mock.Mock() - context1._get_server.return_value = None - context2 = mock.Mock() - context2._get_server.return_value = context2 - - try: - Context.list.clear() - except AttributeError: - # clear() but works in Py2.7 - Context.list[:] = [] - - Context.list.extend([ - context1, - context2, - ]) - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] sample_vnf = SampleVNF('vnf1', vnfd) + sample_vnf.scenario_helper.scenario_cfg = { + 'nodes': {sample_vnf.name: 'mock'} + } sample_vnf.APP_NAME = 'sample1' sample_vnf._start_server = mock.Mock(return_value=0) sample_vnf._vnf_process = mock.MagicMock() @@ -1572,7 +1559,6 @@ class TestSampleVnf(unittest.TestCase): } self.assertIsNone(sample_vnf.instantiate(scenario_cfg, {})) - self.assertEqual(sample_vnf.nfvi_context, context2) def test__update_collectd_options(self): scenario_cfg = {'options': @@ -1709,9 +1695,13 @@ class TestSampleVnf(unittest.TestCase): self.assertEqual(sample_vnf.get_stats(), 'the stats') - def test_collect_kpi(self): + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + def test_collect_kpi(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] sample_vnf = SampleVNF('vnf1', vnfd) + sample_vnf.scenario_helper.scenario_cfg = { + 'nodes': {sample_vnf.name: "mock"} + } sample_vnf.APP_NAME = 'sample1' sample_vnf.COLLECT_KPI = r'\s(\d+)\D*(\d+)\D*(\d+)' sample_vnf.COLLECT_MAP = { @@ -1728,18 +1718,24 @@ class TestSampleVnf(unittest.TestCase): 'k2': 34, 'k3': 91, 'collect_stats': {}, + 'physical_node': 'mock_node' } result = sample_vnf.collect_kpi() self.assertDictEqual(result, expected) - def test_collect_kpi_default(self): + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + def test_collect_kpi_default(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] sample_vnf = SampleVNF('vnf1', vnfd) + sample_vnf.scenario_helper.scenario_cfg = { + 'nodes': {sample_vnf.name: "mock"} + } sample_vnf.APP_NAME = 'sample1' sample_vnf.COLLECT_KPI = r'\s(\d+)\D*(\d+)\D*(\d+)' sample_vnf.get_stats = mock.Mock(return_value='') expected = { + 'physical_node': 'mock_node', 'packets_in': 0, 'packets_fwd': 0, 'packets_dropped': 0, diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py index 59594a3c3..66f9e93ae 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# import subprocess @@ -20,18 +19,10 @@ import six import unittest from yardstick import ssh +from yardstick.benchmark.contexts import base as ctx_base from yardstick.common import utils -from yardstick.tests import STL_MOCKS - - -STLClient = mock.MagicMock() -stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) -stl_patch.start() - -if stl_patch: - from yardstick.network_services.vnf_generic.vnf.tg_ixload import IxLoadTrafficGen - from yardstick.network_services.vnf_generic.vnf.tg_ixload import IxLoadResourceHelper - from yardstick.network_services.traffic_profile.base import TrafficProfile +from yardstick.network_services.vnf_generic.vnf import tg_ixload +from yardstick.network_services.traffic_profile.base import TrafficProfile NAME = "tg__1" @@ -118,146 +109,127 @@ class TestIxLoadTrafficGen(unittest.TestCase): def setUp(self): self._mock_call = mock.patch.object(subprocess, "call") self.mock_call = self._mock_call.start() - self._mock_open = mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open") + self._mock_open = mock.patch.object(tg_ixload, 'open') self.mock_open = self._mock_open.start() - self.addCleanup(self._stop_mock) def _stop_mock(self): self._mock_call.stop() self._mock_open.stop() - def test___init__(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) - self.assertIsNone(ixload_traffic_gen.resource_helper.data) + @mock.patch.object(ssh, 'SSH') + def test___init__(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + self.assertIsNone(ixload_traffic_gen.resource_helper.data) - def test_collect_kpi(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) - ixload_traffic_gen.data = {} - restult = ixload_traffic_gen.collect_kpi() - self.assertEqual({}, restult) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') + @mock.patch.object(ssh, 'SSH') + def test_collect_kpi(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen.scenario_helper.scenario_cfg = { + 'nodes': {ixload_traffic_gen.name: "mock"} + } + ixload_traffic_gen.data = {} + result = ixload_traffic_gen.collect_kpi() - def test_listen_traffic(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) - self.assertIsNone(ixload_traffic_gen.listen_traffic({})) + expected = { + 'physical_node': 'mock_node', + 'collect_stats': {}} + self.assertEqual(expected, result) + + @mock.patch.object(ssh, 'SSH') + def test_listen_traffic(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + self.assertIsNone(ixload_traffic_gen.listen_traffic({})) @mock.patch.object(utils, 'find_relative_file') @mock.patch.object(utils, 'makedirs') + @mock.patch.object(ctx_base.Context, 'get_context_from_server') @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil") - def test_instantiate(self, *args): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh_mock.run = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) - scenario_cfg = {'tc': "nsb_test_case", - 'ixia_profile': "ixload.cfg", - 'task_path': "/path/to/task"} - ixload_traffic_gen.RESULTS_MOUNT = "/tmp/result" - scenario_cfg.update({'options': {'packetsize': 64, 'traffic_type': 4, - 'rfc2544': {'allowed_drop_rate': '0.8 - 1'}, - 'vnf__1': {'rules': 'acl_1rule.yaml', - 'vnf_config': {'lb_config': 'SW', - 'lb_count': 1, - 'worker_config': - '1C/1T', - 'worker_threads': 1}} - }}) - with mock.patch.object(six.moves.builtins, 'open', - create=True) as mock_open: - mock_open.return_value = mock.MagicMock() - ixload_traffic_gen.instantiate(scenario_cfg, {}) + @mock.patch.object(ssh, 'SSH') + def test_instantiate(self, mock_shutil, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + scenario_cfg = {'tc': "nsb_test_case", + 'ixia_profile': "ixload.cfg", + 'task_path': "/path/to/task"} + ixload_traffic_gen.RESULTS_MOUNT = "/tmp/result" + mock_shutil.copy = mock.Mock() + scenario_cfg.update({'options': {'packetsize': 64, 'traffic_type': 4, + 'rfc2544': {'allowed_drop_rate': '0.8 - 1'}, + 'vnf__1': {'rules': 'acl_1rule.yaml', + 'vnf_config': {'lb_config': 'SW', + 'lb_count': 1, + 'worker_config': + '1C/1T', + 'worker_threads': 1}} + }}) + scenario_cfg.update({ + 'nodes': {ixload_traffic_gen.name: "mock"} + }) + with mock.patch.object(six.moves.builtins, 'open', + create=True) as mock_open: + mock_open.return_value = mock.MagicMock() + ixload_traffic_gen.instantiate(scenario_cfg, {}) @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.min") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil") + @mock.patch.object(ssh, 'SSH') def test_run_traffic(self, *args): mock_traffic_profile = mock.Mock(autospec=TrafficProfile) mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh_mock.run = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vnfd["mgmt-interface"].update({"tg-config": {}}) - vnfd["mgmt-interface"]["tg-config"].update({"ixchassis": - "1.1.1.1"}) - vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path": - "/root"}) - sut = IxLoadTrafficGen(NAME, vnfd) - sut.connection = mock.Mock() - sut.connection.run = mock.Mock() - sut._traffic_runner = mock.Mock(return_value=0) - result = sut.run_traffic(mock_traffic_profile) - self.assertIsNone(result) + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vnfd["mgmt-interface"].update({"tg-config": {}}) + vnfd["mgmt-interface"]["tg-config"].update({"ixchassis": + "1.1.1.1"}) + vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path": + "/root"}) + sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + sut.connection = mock.Mock() + sut.connection.run = mock.Mock() + sut._traffic_runner = mock.Mock(return_value=0) + result = sut.run_traffic(mock_traffic_profile) + self.assertIsNone(result) @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.min") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil") + @mock.patch.object(ssh, 'SSH') def test_run_traffic_csv(self, *args): mock_traffic_profile = mock.Mock(autospec=TrafficProfile) mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh_mock.run = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vnfd["mgmt-interface"].update({"tg-config": {}}) - vnfd["mgmt-interface"]["tg-config"].update({"ixchassis": - "1.1.1.1"}) - vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path": - "/root"}) - sut = IxLoadTrafficGen(NAME, vnfd) - sut.connection = mock.Mock() - sut.connection.run = mock.Mock() - sut._traffic_runner = mock.Mock(return_value=0) - subprocess.call(["touch", "/tmp/1.csv"]) - sut.rel_bin_path = mock.Mock(return_value="/tmp/*.csv") - result = sut.run_traffic(mock_traffic_profile) - self.assertIsNone(result) + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vnfd["mgmt-interface"].update({"tg-config": {}}) + vnfd["mgmt-interface"]["tg-config"].update({"ixchassis": + "1.1.1.1"}) + vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path": + "/root"}) + sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + sut.connection = mock.Mock() + sut.connection.run = mock.Mock() + sut._traffic_runner = mock.Mock(return_value=0) + sut.rel_bin_path = mock.Mock(return_value="/tmp/*.csv") + result = sut.run_traffic(mock_traffic_profile) + self.assertIsNone(result) - @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call") @mock.patch.object(ssh, 'SSH') def test_terminate(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) self.assertIsNone(ixload_traffic_gen.terminate()) - @mock.patch("yardstick.ssh.SSH") + @mock.patch.object(ssh, 'SSH') def test_parse_csv_read(self, mock_ssh): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] kpi_data = { @@ -273,15 +245,16 @@ class TestIxLoadTrafficGen(unittest.TestCase): mock_ssh_type.execute.return_value = 0, "", "" mock_ssh.from_node.return_value = mock_ssh_type - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) result = ixload_traffic_gen.resource_helper.result ixload_traffic_gen.resource_helper.parse_csv_read(http_reader) - for key_left, key_right in IxLoadResourceHelper.KPI_LIST.items(): + for key_left, key_right in ( + tg_ixload.IxLoadResourceHelper.KPI_LIST.items()): self.assertEqual(result[key_left][-1], int(kpi_data[key_right])) - @mock.patch("yardstick.ssh.SSH") - def test_parse_csv_read_value_error(self, mock_ssh): + @mock.patch.object(ssh, 'SSH') + def test_parse_csv_read_value_error(self, mock_ssh, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] http_reader = [{ 'HTTP Total Throughput (Kbps)': 1, @@ -295,14 +268,14 @@ class TestIxLoadTrafficGen(unittest.TestCase): mock_ssh_type.execute.return_value = 0, "", "" mock_ssh.from_node.return_value = mock_ssh_type - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) init_value = ixload_traffic_gen.resource_helper.result ixload_traffic_gen.resource_helper.parse_csv_read(http_reader) self.assertDictEqual(ixload_traffic_gen.resource_helper.result, init_value) @mock.patch.object(ssh, 'SSH') - def test_parse_csv_read_error(self, mock_ssh): + def test_parse_csv_read_error(self, mock_ssh, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] http_reader = [{ 'HTTP Total Throughput (Kbps)': 1, @@ -315,7 +288,7 @@ class TestIxLoadTrafficGen(unittest.TestCase): mock_ssh_type.execute.return_value = 0, "", "" mock_ssh.from_node.return_value = mock_ssh_type - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) with self.assertRaises(KeyError): ixload_traffic_gen.resource_helper.parse_csv_read(http_reader) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py index 14e0db788..d774bb9f7 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py @@ -21,6 +21,7 @@ import unittest from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh from yardstick.tests import STL_MOCKS +from yardstick.benchmark.contexts import base as ctx_base SSH_HELPER = "yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper" @@ -253,14 +254,25 @@ class TestPingTrafficGen(unittest.TestCase): self.assertNotEqual(ext_ifs[0]['virtual-interface']['local_iface_name'], 'if_name_1') self.assertNotEqual(ext_ifs[1]['virtual-interface']['local_iface_name'], 'if_name_2') + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch("yardstick.ssh.SSH") - def test_collect_kpi(self, ssh): + def test_collect_kpi(self, ssh, *args): mock_ssh(ssh, exec_result=(0, "success", "")) + ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0) + ping_traffic_gen.scenario_helper.scenario_cfg = { + 'nodes': {ping_traffic_gen.name: "mock"} + } ping_traffic_gen._queue = Queue() ping_traffic_gen._queue.put({}) - ping_traffic_gen.collect_kpi() - self.assertEqual(ping_traffic_gen._result, {}) + expected = { + 'physical_node': 'mock_node', + 'collect_stats': {} + } + # NOTE: Why we check _result but not collect_kpi() return value + # self.assertEqual(ping_traffic_gen._result, {}) + self.assertEqual(ping_traffic_gen.collect_kpi(), expected) + @mock.patch(SSH_HELPER) def test_instantiate(self, ssh): diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py index f581ec8d9..050aa4aa0 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py @@ -18,6 +18,7 @@ import mock from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh from yardstick.tests import STL_MOCKS +from yardstick.benchmark.contexts import base as ctx_base SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper' @@ -324,15 +325,22 @@ class TestProxTrafficGen(unittest.TestCase): self.assertIsNone(prox_traffic_gen._tg_process) self.assertIsNone(prox_traffic_gen._traffic_process) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) def test_collect_kpi(self, ssh, *args): mock_ssh(ssh) - prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0) + prox_traffic_gen.scenario_helper.scenario_cfg = { + 'nodes': {prox_traffic_gen.name: "mock"} + } prox_traffic_gen._vnf_wrapper.resource_helper.resource = mock.MagicMock( **{"self.check_if_system_agent_running.return_value": [False]}) prox_traffic_gen._vnf_wrapper.vnf_execute = mock.Mock(return_value="") - self.assertEqual({}, prox_traffic_gen.collect_kpi()) + expected = { + 'collect_stats': {}, + 'physical_node': 'mock_node' + } + self.assertEqual(prox_traffic_gen.collect_kpi(), expected) @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.find_relative_file') diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py index dca8098fa..42ac40b50 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py @@ -21,6 +21,7 @@ import unittest from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api from yardstick.network_services.traffic_profile import base as tp_base from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_ixia +from yardstick.benchmark.contexts import base as ctx_base TEST_FILE_YAML = 'nsb_test_case.yaml' @@ -186,6 +187,7 @@ class TestIXIATrafficGen(unittest.TestCase): ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd) self.assertIsNone(ixnet_traffic_gen.listen_traffic({})) + @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') def test_instantiate(self, *args): with mock.patch("yardstick.ssh.SSH") as ssh: ssh_mock = mock.Mock(autospec=ssh.SSH) @@ -212,6 +214,9 @@ class TestIXIATrafficGen(unittest.TestCase): 'lb_count': 1, 'worker_config': '1C/1T', 'worker_threads': 1}}}}) + scenario_cfg.update({ + 'nodes': {ixnet_traffic_gen.name: "mock"} + }) ixnet_traffic_gen.topology = "" ixnet_traffic_gen.get_ixobj = mock.MagicMock() ixnet_traffic_gen._ixia_traffic_gen = mock.MagicMock() @@ -220,17 +225,26 @@ class TestIXIATrafficGen(unittest.TestCase): IOError, ixnet_traffic_gen.instantiate(scenario_cfg, {})) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') def test_collect_kpi(self, *args): with mock.patch("yardstick.ssh.SSH") as ssh: ssh_mock = mock.Mock(autospec=ssh.SSH) ssh_mock.execute = \ mock.Mock(return_value=(0, "", "")) ssh.from_node.return_value = ssh_mock + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd) + ixnet_traffic_gen.scenario_helper.scenario_cfg = { + 'nodes': {ixnet_traffic_gen.name: "mock"} + } ixnet_traffic_gen.data = {} restult = ixnet_traffic_gen.collect_kpi() - self.assertEqual({}, restult) + + expected = {'collect_stats': {}, + 'physical_node': 'mock_node'} + + self.assertEqual(expected, restult) def test_terminate(self, *args): with mock.patch("yardstick.ssh.SSH") as ssh: diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py index 9531b90c4..4d3e4ff0b 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py @@ -18,6 +18,7 @@ import unittest from yardstick.network_services.traffic_profile import base as tp_base from yardstick.network_services.vnf_generic.vnf import sample_vnf from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_trex +from yardstick.benchmark.contexts import base as ctx_base class TestTrexRfcResouceHelper(unittest.TestCase): @@ -224,7 +225,20 @@ class TestTrexTrafficGenRFC(unittest.TestCase): trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0) self.assertIsNotNone(trex_traffic_gen.resource_helper._terminated.value) - def test_instantiate(self): + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + def test_collect_kpi(self, *args): + trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0) + trex_traffic_gen.scenario_helper.scenario_cfg = { + 'nodes': {trex_traffic_gen.name: "mock"} + } + expected = { + 'physical_node': 'mock_node', + 'collect_stats': {}, + } + self.assertEqual(trex_traffic_gen.collect_kpi(), expected) + + @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') + def test_instantiate(self, *args): mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile) mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE @@ -255,10 +269,11 @@ class TestTrexTrafficGenRFC(unittest.TestCase): }, } tg_rfc2544_trex.WAIT_TIME = 3 - scenario_cfg.update({"nodes": ["tg_1", "vnf_1"]}) + scenario_cfg.update({"nodes": {"tg_1": {}, "vnf1": {}}}) self.assertIsNone(trex_traffic_gen.instantiate(scenario_cfg, {})) - def test_instantiate_error(self): + @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') + def test_instantiate_error(self, *args): mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile) mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE @@ -268,10 +283,10 @@ class TestTrexTrafficGenRFC(unittest.TestCase): trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock() scenario_cfg = { "tc": "tc_baremetal_rfc2544_ipv4_1flow_64B", - "nodes": [ - "tg_1", - "vnf_1", - ], + "nodes": { + "tg_1": {}, + "vnf1": {} + }, "topology": 'nsb_test_case.yaml', 'options': { 'packetsize': 64, diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py index 4f8742477..350ba8448 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py @@ -21,9 +21,10 @@ from yardstick.network_services.traffic_profile import base as tp_base from yardstick.network_services.traffic_profile import rfc2544 from yardstick.network_services.vnf_generic.vnf import sample_vnf from yardstick.network_services.vnf_generic.vnf import tg_trex +from yardstick.benchmark.contexts import base as ctx_base -NAME = 'vnf_1' +NAME = 'vnf__1' class TestTrexTrafficGen(unittest.TestCase): @@ -303,19 +304,28 @@ class TestTrexTrafficGen(unittest.TestCase): self.assertIsInstance(trex_traffic_gen.resource_helper, tg_trex.TrexResourceHelper) - def test_collect_kpi(self): + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + def test_collect_kpi(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) + trex_traffic_gen.scenario_helper.scenario_cfg = { + 'nodes': {trex_traffic_gen.name: "mock"} + } trex_traffic_gen.resource_helper._queue.put({}) result = trex_traffic_gen.collect_kpi() - self.assertEqual({}, result) + expected = { + 'physical_node': 'mock_node', + 'collect_stats': {} + } + self.assertEqual(expected, result) def test_listen_traffic(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) self.assertIsNone(trex_traffic_gen.listen_traffic({})) - def test_instantiate(self): + @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') + def test_instantiate(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen._start_server = mock.Mock(return_value=0) @@ -329,7 +339,8 @@ class TestTrexTrafficGen(unittest.TestCase): self.assertIsNone(trex_traffic_gen.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG)) - def test_instantiate_error(self): + @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') + def test_instantiate_error(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen._start_server = mock.Mock(return_value=0) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py index 05a0ead71..1c4ced303 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py @@ -19,6 +19,7 @@ import os from yardstick.tests import STL_MOCKS from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh +from yardstick.benchmark.contexts import base as ctx_base SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper' @@ -330,6 +331,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): self.assertIsNone(udp_replay_approx_vnf._vnf_process) @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) def test_collect_kpi(self, ssh, *args): mock_ssh(ssh) @@ -341,14 +343,21 @@ class TestUdpReplayApproxVnf(unittest.TestCase): "0\t\t7374156\t\t7374136\t\t\t0\t\t\t0\r\n" \ "1\t\t7374316\t\t7374315\t\t\t0\t\t\t0\r\n\r\nReplay>\r\r\nReplay>" udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, vnfd) + udp_replay_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {udp_replay_approx_vnf.name: "mock"} + } udp_replay_approx_vnf.q_in = mock.MagicMock() udp_replay_approx_vnf.q_out = mock.MagicMock() udp_replay_approx_vnf.q_out.qsize = mock.Mock(return_value=0) udp_replay_approx_vnf.all_ports = ["xe0", "xe1"] udp_replay_approx_vnf.get_stats = mock.Mock(return_value=get_stats_ret_val) - - result = {'collect_stats': {}, 'packets_dropped': 0, - 'packets_fwd': 14748451, 'packets_in': 14748472} + result = { + 'physical_node': 'mock_node', + 'collect_stats': {}, + 'packets_dropped': 0, + 'packets_fwd': 14748451, + 'packets_in': 14748472 + } self.assertEqual(result, udp_replay_approx_vnf.collect_kpi()) @mock.patch(SSH_HELPER) @@ -372,14 +381,18 @@ class TestUdpReplayApproxVnf(unittest.TestCase): file_path = os.path.join(curr_path, filename) return file_path - @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context") + @mock.patch.object(ctx_base.Context, 'get_context_from_server') @mock.patch(SSH_HELPER) - def test__build_config(self, ssh, mock_context, *args): + def test__build_config(self, ssh, mock_get_ctx, *args): mock_ssh(ssh) + nfvi_context = mock.Mock() + nfvi_context.attrs = {'nfvi_type': 'baremetal'} + mock_get_ctx.return_value = nfvi_context + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0) udp_replay_approx_vnf.queue_wrapper = mock.MagicMock() - udp_replay_approx_vnf.nfvi_context = mock_context + udp_replay_approx_vnf.nfvi_context = mock_get_ctx udp_replay_approx_vnf.nfvi_context.attrs = {'nfvi_type': 'baremetal'} udp_replay_approx_vnf.setup_helper.bound_pci = [] udp_replay_approx_vnf.ssh_helper.provision_tool = mock.MagicMock(return_value="tool_path") @@ -393,13 +406,16 @@ class TestUdpReplayApproxVnf(unittest.TestCase): self.assertEqual(cmd_line, expected) @mock.patch('yardstick.network_services.vnf_generic.vnf.udp_replay.open') - @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context") + @mock.patch.object(ctx_base.Context, 'get_context_from_server') @mock.patch(SSH_HELPER) - def test__build_pipeline_kwargs(self, ssh, mock_context, *args): + def test__build_pipeline_kwargs(self, ssh, mock_get_ctx, *args): mock_ssh(ssh) + + nfvi_context = mock.Mock() + nfvi_context.attrs = {'nfvi_type': "baremetal"} + mock_get_ctx.return_value = nfvi_context + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0) - udp_replay_approx_vnf.nfvi_context = mock_context - udp_replay_approx_vnf.nfvi_context.attrs = {'nfvi_type': 'baremetal'} udp_replay_approx_vnf.setup_helper.bound_pci = ['0000:00:0.1', '0000:00:0.3'] udp_replay_approx_vnf.all_ports = ["xe0", "xe1"] udp_replay_approx_vnf.ssh_helper.provision_tool = mock.MagicMock(return_value="tool_path") @@ -430,7 +446,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): udp_replay_approx_vnf.ssh_helper.run.assert_called_once() - @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context") + @mock.patch.object(ctx_base.Context, 'get_context_from_server') @mock.patch(SSH_HELPER) def test_instantiate(self, ssh, *args): mock_ssh(ssh) @@ -449,7 +465,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): self.assertEqual(udp_replay_approx_vnf.wait_for_instantiate(), 0) - @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context") + @mock.patch.object(ctx_base.Context, 'get_context_from_server') @mock.patch('yardstick.ssh.SSH') @mock.patch(SSH_HELPER) def test_instantiate_panic(self, *args): diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py index a0a0794ea..b67a3cdee 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py @@ -21,7 +21,7 @@ from yardstick.tests import STL_MOCKS from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh from yardstick.common import utils - +from yardstick.benchmark.contexts import base as ctx_base STLClient = mock.MagicMock() stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) @@ -259,12 +259,15 @@ pipeline> """ # noqa @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) def test_collect_kpi(self, ssh, *args): mock_ssh(ssh) - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] vfw_approx_vnf = FWApproxVnf(name, vnfd) + vfw_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {vfw_approx_vnf.name: "mock"} + } vfw_approx_vnf.q_in = mock.MagicMock() vfw_approx_vnf.q_out = mock.MagicMock() vfw_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -273,6 +276,7 @@ pipeline> **{'collect_kpi.return_value': {"core": {}}}) vfw_approx_vnf.vnf_execute = mock.Mock(return_value=self.STATS) result = { + 'physical_node': 'mock_node', 'packets_dropped': 0, 'packets_fwd': 6007180, 'packets_in': 6007180, @@ -334,7 +338,7 @@ pipeline> vfw_approx_vnf.ssh_helper.run.assert_called_once() @mock.patch.object(utils, 'find_relative_file') - @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context") + @mock.patch.object(ctx_base.Context, 'get_context_from_server') @mock.patch(SSH_HELPER) def test_instantiate(self, ssh, *args): mock_ssh(ssh) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py index 73f91d1b1..c1664f2f0 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py @@ -11,34 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# from multiprocessing import Process, Queue -import os import time import mock from six.moves import configparser import unittest -from yardstick.tests import STL_MOCKS -from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import FileAbsPath -from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh -from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper -from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper - - -SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper' - -STLClient = mock.MagicMock() -stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) -stl_patch.start() - -if stl_patch: - from yardstick.network_services.vnf_generic.vnf.vpe_vnf import ConfigCreate - from yardstick.network_services.nfvi.resource import ResourceProfile - from yardstick.network_services.vnf_generic.vnf.vpe_vnf import \ - VpeApproxVnf, VpeApproxSetupEnvHelper +from yardstick.benchmark.contexts import base as ctx_base +from yardstick.network_services.nfvi.resource import ResourceProfile +from yardstick.network_services.vnf_generic.vnf import base as vnf_base +from yardstick.network_services.vnf_generic.vnf import sample_vnf +from yardstick.network_services.vnf_generic.vnf import vpe_vnf +from yardstick.tests.unit.network_services.vnf_generic.vnf import test_base TEST_FILE_YAML = 'nsb_test_case.yaml' @@ -47,7 +33,7 @@ NAME = 'vnf_1' PING_OUTPUT_1 = "Pkts in: 101\r\n\tPkts dropped by AH: 100\r\n\tPkts dropped by other: 100" -MODULE_PATH = FileAbsPath(__file__) +MODULE_PATH = test_base.FileAbsPath(__file__) get_file_abspath = MODULE_PATH.get_path @@ -155,20 +141,20 @@ class TestConfigCreate(unittest.TestCase): } def test___init__(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) self.assertEqual(config_create.uplink_ports, ['xe0']) self.assertEqual(config_create.downlink_ports, ['xe1']) self.assertEqual(config_create.socket, 2) def test_dpdk_port_to_link_id(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) self.assertEqual(config_create.dpdk_port_to_link_id_map, {'xe0': 0, 'xe1': 1}) def test_vpe_initialize(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) config = configparser.ConfigParser() config_create.vpe_initialize(config) self.assertEqual(config.get('EAL', 'log_level'), '0') @@ -178,16 +164,16 @@ class TestConfigCreate(unittest.TestCase): self.assertEqual(config.get('MEMPOOL1', 'pool_size'), '2M') def test_vpe_rxq(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) config = configparser.ConfigParser() config_create.downlink_ports = ['xe0'] config_create.vpe_rxq(config) self.assertEqual(config.get('RXQ0.0', 'mempool'), 'MEMPOOL1') def test_get_sink_swq(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) config = configparser.ConfigParser() config.add_section('PIPELINE0') config.set('PIPELINE0', 'key1', 'value1') @@ -204,8 +190,8 @@ class TestConfigCreate(unittest.TestCase): self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key5', 5), 'SWQ0 SINK1') def test_generate_vpe_script(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - vpe_config_vnf = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + vpe_config_vnf = vpe_vnf.ConfigCreate(vnfd_helper, 2) intf = [ { "name": 'xe1', @@ -229,15 +215,34 @@ class TestConfigCreate(unittest.TestCase): self.assertNotEqual(result, '') def test_create_vpe_config(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 23) - config_create.downlink_ports = ['xe1'] + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 23) config_create.uplink_ports = ['xe1'] - curr_path = os.path.dirname(os.path.abspath(__file__)) - vpe_cfg = "samples/vnf_samples/nsut/vpe/vpe_config" - vnf_cfg = os.path.join(curr_path, "../../../../..", vpe_cfg) - config_create.create_vpe_config(vnf_cfg) - os.system("git checkout -- %s" % vnf_cfg) + with mock.patch.object(config_create, 'vpe_upstream') as mock_up, \ + mock.patch.object(config_create, 'vpe_downstream') as \ + mock_down, \ + mock.patch.object(config_create, 'vpe_tmq') as mock_tmq, \ + mock.patch.object(config_create, 'vpe_initialize') as \ + mock_ini, \ + mock.patch.object(config_create, 'vpe_rxq') as mock_rxq: + mock_ini_obj = mock.Mock() + mock_rxq_obj = mock.Mock() + mock_up_obj = mock.Mock() + mock_down_obj = mock.Mock() + mock_tmq_obj = mock.Mock() + mock_ini.return_value = mock_ini_obj + mock_rxq.return_value = mock_rxq_obj + mock_up.return_value = mock_up_obj + mock_down.return_value = mock_down_obj + mock_tmq.return_value = mock_tmq_obj + config_create.create_vpe_config('fake_config_file') + + mock_rxq.assert_called_once_with(mock_ini_obj) + mock_up.assert_called_once_with('fake_config_file', 0) + mock_down.assert_called_once_with('fake_config_file', 0) + mock_tmq.assert_called_once_with(mock_down_obj, 0) + mock_up_obj.write.assert_called_once() + mock_tmq_obj.write.assert_called_once() class TestVpeApproxVnf(unittest.TestCase): @@ -409,7 +414,7 @@ class TestVpeApproxVnf(unittest.TestCase): 'interfaces': { 'xe0': { 'local_iface_name': 'ens513f0', - 'vld_id': VpeApproxVnf.DOWNLINK, + 'vld_id': vpe_vnf.VpeApproxVnf.DOWNLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.40.20', 'dst_mac': '00:00:00:00:00:01', @@ -443,7 +448,7 @@ class TestVpeApproxVnf(unittest.TestCase): 'interfaces': { 'xe0': { 'local_iface_name': 'ens785f0', - 'vld_id': VpeApproxVnf.UPLINK, + 'vld_id': vpe_vnf.VpeApproxVnf.UPLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.100.20', 'dst_mac': '00:00:00:00:00:02', @@ -474,7 +479,7 @@ class TestVpeApproxVnf(unittest.TestCase): 'interfaces': { 'xe0': { 'local_iface_name': 'ens786f0', - 'vld_id': VpeApproxVnf.UPLINK, + 'vld_id': vpe_vnf.VpeApproxVnf.UPLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.100.19', 'dst_mac': '00:00:00:00:00:04', @@ -486,7 +491,7 @@ class TestVpeApproxVnf(unittest.TestCase): }, 'xe1': { 'local_iface_name': 'ens786f1', - 'vld_id': VpeApproxVnf.DOWNLINK, + 'vld_id': vpe_vnf.VpeApproxVnf.DOWNLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.40.19', 'dst_mac': '00:00:00:00:00:03', @@ -544,25 +549,31 @@ class TestVpeApproxVnf(unittest.TestCase): self._mock_time_sleep.stop() def test___init__(self): - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) self.assertIsNone(vpe_approx_vnf._vnf_process) - @mock.patch(SSH_HELPER) - def test_collect_kpi_sa_not_running(self, ssh): - mock_ssh(ssh) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') + @mock.patch.object(sample_vnf, 'VnfSshHelper') + def test_collect_kpi_sa_not_running(self, ssh, *args): + test_base.mock_ssh(ssh) resource = mock.Mock(autospec=ResourceProfile) resource.check_if_system_agent_running.return_value = 1, '' resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234} resource.check_if_system_agent_running.return_value = (1, None) - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {vpe_approx_vnf.name: "mock"} + } vpe_approx_vnf.q_in = mock.MagicMock() vpe_approx_vnf.q_out = mock.MagicMock() vpe_approx_vnf.q_out.qsize = mock.Mock(return_value=0) vpe_approx_vnf.resource_helper.resource = resource expected = { + 'physical_node': 'mock_node', 'pkt_in_down_stream': 0, 'pkt_in_up_stream': 0, 'pkt_drop_down_stream': 0, @@ -571,21 +582,27 @@ class TestVpeApproxVnf(unittest.TestCase): } self.assertEqual(vpe_approx_vnf.collect_kpi(), expected) - @mock.patch(SSH_HELPER) - def test_collect_kpi_sa_running(self, ssh): - mock_ssh(ssh) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') + @mock.patch.object(sample_vnf, 'VnfSshHelper') + def test_collect_kpi_sa_running(self, ssh, *args): + test_base.mock_ssh(ssh) resource = mock.Mock(autospec=ResourceProfile) resource.check_if_system_agent_running.return_value = 0, '1234' resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234} - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {vpe_approx_vnf.name: "mock"} + } vpe_approx_vnf.q_in = mock.MagicMock() vpe_approx_vnf.q_out = mock.MagicMock() vpe_approx_vnf.q_out.qsize = mock.Mock(return_value=0) vpe_approx_vnf.resource_helper.resource = resource expected = { + 'physical_node': 'mock_node', 'pkt_in_down_stream': 0, 'pkt_in_up_stream': 0, 'pkt_drop_down_stream': 0, @@ -594,20 +611,20 @@ class TestVpeApproxVnf(unittest.TestCase): } self.assertEqual(vpe_approx_vnf.collect_kpi(), expected) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_vnf_execute(self, ssh): - mock_ssh(ssh) - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + test_base.mock_ssh(ssh) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.q_in = mock.MagicMock() vpe_approx_vnf.q_out = mock.MagicMock() vpe_approx_vnf.q_out.qsize = mock.Mock(return_value=0) self.assertEqual(vpe_approx_vnf.vnf_execute("quit", 0), '') - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_run_vpe(self, ssh): - mock_ssh(ssh) + test_base.mock_ssh(ssh) - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML) vpe_approx_vnf.vnf_cfg = { 'lb_config': 'SW', @@ -634,14 +651,13 @@ class TestVpeApproxVnf(unittest.TestCase): self.assertIsNone(vpe_approx_vnf._run()) @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig") - @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context") @mock.patch("yardstick.network_services.vnf_generic.vnf.vpe_vnf.ConfigCreate") @mock.patch("six.moves.builtins.open") - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_build_config(self, ssh, *args): - mock_ssh(ssh) - vpe_approx_vnf = VpeApproxSetupEnvHelper(mock.MagicMock(), - mock.MagicMock(), mock.MagicMock()) + test_base.mock_ssh(ssh) + vpe_approx_vnf = vpe_vnf.VpeApproxSetupEnvHelper( + mock.MagicMock(), mock.MagicMock(), mock.MagicMock()) vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML) vpe_approx_vnf.generate_port_pairs = mock.Mock() vpe_approx_vnf.vnf_cfg = { @@ -677,9 +693,9 @@ class TestVpeApproxVnf(unittest.TestCase): expected = 'sudo tool_path -p 0x3 -f /tmp/vpe_config -s /tmp/vpe_script --hwlb 3' self.assertEqual(vpe_approx_vnf.build_config(), expected) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate(self, ssh): - mock_ssh(ssh) + test_base.mock_ssh(ssh) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = True @@ -691,18 +707,19 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.q_out = mock_q_out - vpe_approx_vnf.queue_wrapper = mock.Mock(autospec=QueueFileWrapper) + vpe_approx_vnf.queue_wrapper = mock.Mock( + autospec=vnf_base.QueueFileWrapper) vpe_approx_vnf.resource_helper.resource = mock_resource vpe_approx_vnf.q_out.put("pipeline>") self.assertEqual(vpe_approx_vnf.wait_for_instantiate(), 432) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate_fragmented(self, ssh): - mock_ssh(ssh) + test_base.mock_ssh(ssh) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = True @@ -715,17 +732,18 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.q_out = mock_q_out - vpe_approx_vnf.queue_wrapper = mock.Mock(autospec=QueueFileWrapper) + vpe_approx_vnf.queue_wrapper = mock.Mock( + autospec=vnf_base.QueueFileWrapper) vpe_approx_vnf.resource_helper.resource = mock_resource self.assertEqual(vpe_approx_vnf.wait_for_instantiate(), 432) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate_crash(self, ssh): - mock_ssh(ssh, exec_result=(1, "", "")) + test_base.mock_ssh(ssh, exec_result=(1, "", "")) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = False @@ -733,7 +751,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.resource_helper.resource = mock_resource @@ -742,9 +760,9 @@ class TestVpeApproxVnf(unittest.TestCase): self.assertIn('VNF process died', str(raised.exception)) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate_panic(self, ssh): - mock_ssh(ssh, exec_result=(1, "", "")) + test_base.mock_ssh(ssh, exec_result=(1, "", "")) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = True @@ -752,7 +770,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.resource_helper.resource = mock_resource @@ -762,9 +780,9 @@ class TestVpeApproxVnf(unittest.TestCase): self.assertIn('Error starting', str(raised.exception)) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate_panic_fragmented(self, ssh): - mock_ssh(ssh, exec_result=(1, "", "")) + test_base.mock_ssh(ssh, exec_result=(1, "", "")) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = True @@ -777,7 +795,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.q_out = mock_q_out vpe_approx_vnf.resource_helper.resource = mock_resource @@ -787,11 +805,11 @@ class TestVpeApproxVnf(unittest.TestCase): self.assertIn('Error starting', str(raised.exception)) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_terminate(self, ssh): - mock_ssh(ssh) + test_base.mock_ssh(ssh) - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock.MagicMock() vpe_approx_vnf._resource_collect_stop = mock.Mock() vpe_approx_vnf.resource_helper = mock.MagicMock() |