diff options
27 files changed, 625 insertions, 589 deletions
diff --git a/ansible/roles/download_dpdk/defaults/main.yml b/ansible/roles/download_dpdk/defaults/main.yml index d548280f5..885eebf03 100644 --- a/ansible/roles/download_dpdk/defaults/main.yml +++ b/ansible/roles/download_dpdk/defaults/main.yml @@ -1,14 +1,18 @@ --- -dpdk_version: "17.02" -dpdk_url: "http://dpdk.org/browse/dpdk/snapshot/dpdk-{{ dpdk_version }}.tar.gz" +dpdk_version: "17.02.1" +dpdk_url: "http://fast.dpdk.org/rel/dpdk-{{ dpdk_version }}.tar.xz" dpdk_file: "{{ dpdk_url|basename }}" -dpdk_unarchive: "{{ dpdk_file|regex_replace('[.]tar[.]gz$', '') }}" +dpdk_unarchive: "{{ dpdk_file|regex_replace('[.]tar[.]xz$', '') }}" dpdk_dest: "{{ clone_dest }}/" -#Note DPDK 17.08 17.11 and 18.02 are currently unsupported due to prox build issues -dpdk_sha256s: - "16.07": "sha256:d876e4b2a7101f28e7e345d3c88e66afe877d15f0159c19c5bc5bc26b7b7d788" - "17.02": "sha256:b07b546e910095174bdb6152bb0d7ce057cc4b79aaa74771aeee4e8a7219fb38" - "17.05": "sha256:763bfb7e1765efcc949e79d645dc9f1ebd16591431ba0db5ce22becd928dcd0a" - "17.08": "sha256:3a08addbff45c636538514e9a5838fb91ea557661a4c071e03a9a6987d46e5b6" #unsupported - "17.11": "sha256:77a727bb3834549985f291409c9a77a1e8be1c9329ce4c3eb19a22d1461022e4" #unsupported - "18.02": "sha256:f1210310fd5f01a3babe3a09d9b3e5a9db791c2ec6ecfbf94ade9f893a0632b8" #unsupported + +#NOTE(ralonsoh): DPDK > 17.02 are currently unsupported due to prox build issues +dpdk_md5: + "16.07.2": "md5:4922ea2ec935b64ff5c191fec53344a6" + "16.11.7": "md5:c081d113dfd57633e3bc3ebc802691be" + "17.02.1": "md5:cbdf8b7a92ce934d47c38cbc9c20c54a" + "17.05": "md5:0a68c31cd6a6cabeed0a4331073e4c05" #Ubuntu 17.10 support + "17.05.2": "md5:37afc9ce410d8e6945a1beb173074003" #unsupported + "17.08.2": "md5:dd239a878c8c40cf482fdfe438f8d99c" #unsupported + "17.11.3": "md5:68ca84ac878011acf44e75d33b46f55b" #unsupported + "18.02.2": "md5:75ad6d39b513649744e49c9fcbbb9ca5" #unsupported + "18.05": "md5:9fc86367cd9407ff6a8dfea56c4eddc4" #unsupported diff --git a/ansible/roles/download_dpdk/tasks/main.yml b/ansible/roles/download_dpdk/tasks/main.yml index bcb5dde1a..bea3febed 100644 --- a/ansible/roles/download_dpdk/tasks/main.yml +++ b/ansible/roles/download_dpdk/tasks/main.yml @@ -25,7 +25,7 @@ url: "{{ dpdk_url }}" dest: "{{ dpdk_dest }}" validate_certs: False - checksum: "{{ dpdk_sha256s[dpdk_version] }}" + checksum: "{{ dpdk_md5[dpdk_version] }}" - unarchive: src: "{{ dpdk_dest }}/{{ dpdk_file }}" diff --git a/ansible/roles/download_samplevnfs/defaults/main.yml b/ansible/roles/download_samplevnfs/defaults/main.yml index e40eb67c0..c5e880e57 100644 --- a/ansible/roles/download_samplevnfs/defaults/main.yml +++ b/ansible/roles/download_samplevnfs/defaults/main.yml @@ -1,4 +1,4 @@ --- samplevnf_url: "https://git.opnfv.org/samplevnf" samplevnf_dest: "{{ clone_dest }}/samplevnf" -samplevnf_version: "stable/euphrates" +samplevnf_version: "stable/fraser" diff --git a/ansible/roles/install_yardstick/tasks/main.yml b/ansible/roles/install_yardstick/tasks/main.yml index ee1b83756..973b2b027 100644 --- a/ansible/roles/install_yardstick/tasks/main.yml +++ b/ansible/roles/install_yardstick/tasks/main.yml @@ -37,10 +37,39 @@ # name: pip # state: latest -- name: install yardstick without virtual environment - include_tasks: regular_install.yml +- name: Install Yardstick requirements (venv) + pip: + requirements: "{{ yardstick_dir }}/requirements.txt" + virtualenv: "{{ yardstick_dir }}/virtualenv" + async: 300 + poll: 0 + register: pip_installer + when: virtual_environment == True + +- name: Install Yardstick requirements + pip: + requirements: "{{ yardstick_dir }}/requirements.txt" + async: 300 + poll: 0 + register: pip_installer when: virtual_environment == False -- name: install yardstick with virtual environment - include_tasks: virtual_install.yml +- name: Check install Yardstick requirements + async_status: + jid: "{{ pip_installer.ansible_job_id }}" + register: job_result + until: job_result.finished + retries: 100 + +- name: Install Yardstick code (venv) + pip: + name: "{{ yardstick_dir }}/" + editable: True + virtualenv: "{{ yardstick_dir }}/virtualenv" when: virtual_environment == True + +- name: Install Yardstick code + pip: + name: "{{ yardstick_dir }}/" + editable: True + when: virtual_environment == False diff --git a/ansible/roles/install_yardstick/tasks/regular_install.yml b/ansible/roles/install_yardstick/tasks/regular_install.yml deleted file mode 100644 index cd0e86fb9..000000000 --- a/ansible/roles/install_yardstick/tasks/regular_install.yml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2018 Intel Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ---- -- name: Install Yardstick requirements - pip: - requirements: "{{ yardstick_dir }}/requirements.txt" - -- name: Install Yardstick code - pip: - name: "." - extra_args: -e - chdir: "{{ yardstick_dir }}/" diff --git a/ansible/roles/install_yardstick/tasks/virtual_install.yml b/ansible/roles/install_yardstick/tasks/virtual_install.yml deleted file mode 100644 index 8545acbcb..000000000 --- a/ansible/roles/install_yardstick/tasks/virtual_install.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2018 Intel Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ---- -- name: Install Yardstick requirements - pip: - requirements: "{{ yardstick_dir }}/requirements.txt" - virtualenv: "{{ yardstick_dir }}/virtualenv" - -- name: Install Yardstick code - pip: - name: "{{ yardstick_dir }}/." - extra_args: -e - virtualenv: "{{ yardstick_dir }}/virtualenv" - diff --git a/docker/Dockerfile b/docker/Dockerfile index 4aa7237a5..097bc3c3f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -26,7 +26,7 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \ RUN apt-get update && apt-get install -y git python python-setuptools python-pip iputils-ping && apt-get -y autoremove && apt-get clean RUN easy_install -U setuptools==30.0.0 -RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2 +RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.12.0 python-heatclient==1.11.0 ansible==2.5.5 RUN mkdir -p ${REPOS_DIR} diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch index 21095cbe3..ef41cba03 100644 --- a/docker/Dockerfile.aarch64.patch +++ b/docker/Dockerfile.aarch64.patch @@ -31,7 +31,7 @@ index 62ea0d0..f2f41771 100644 +RUN apt-get update && apt-get install -y git python python-setuptools python-pip iputils-ping && apt-get -y autoremove && \ + apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean RUN easy_install -U setuptools==30.0.0 - RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2 + RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.12.0 python-heatclient==1.11.0 ansible==2.5.5 @@ -48,8 +49,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf # nginx=5000, rabbitmq=5672 diff --git a/docs/testing/developer/devguide/devguide.rst b/docs/testing/developer/devguide/devguide.rst index 04d5350be..dbe92b846 100755 --- a/docs/testing/developer/devguide/devguide.rst +++ b/docs/testing/developer/devguide/devguide.rst @@ -1,16 +1,42 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + Convention for heading levels in Yardstick documentation: + + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + + Avoid deeper levels because they do not render well. + Introduction -============= +------------ -Yardstick is a project dealing with performance testing. Yardstick produces its own test cases but can also be considered as a framework to support feature project testing. +Yardstick is a project dealing with performance testing. Yardstick produces +its own test cases but can also be considered as a framework to support feature +project testing. -Yardstick developed a test API that can be used by any OPNFV project. Therefore there are many ways to contribute to Yardstick. +Yardstick developed a test API that can be used by any OPNFV project. Therefore +there are many ways to contribute to Yardstick. You can: * Develop new test cases * Review codes * Develop Yardstick API / framework -* Develop Yardstick grafana dashboards and Yardstick reporting page +* Develop Yardstick grafana dashboards and Yardstick reporting page * Write Yardstick documentation This developer guide describes how to interact with the Yardstick project. @@ -19,28 +45,30 @@ part is a list of “How to” to help you to join the Yardstick family whatever your field of interest is. Where can I find some help to start? --------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. _`user guide`: http://artifacts.opnfv.org/yardstick/danube/1.0/docs/stesting_user_userguide/index.html .. _`wiki page`: https://wiki.opnfv.org/display/yardstick/ This guide is made for you. You can have a look at the `user guide`_. There are also references on documentation, video tutorials, tips in the -project `wiki page`_. You can also directly contact us by mail with [Yardstick] prefix in the title at opnfv-tech-discuss@lists.opnfv.org or on the IRC chan #opnfv-yardstick. +project `wiki page`_. You can also directly contact us by mail with [Yardstick] +prefix in the subject at opnfv-tech-discuss@lists.opnfv.org or on the IRC chan +#opnfv-yardstick. Yardstick developer areas -========================== +------------------------- Yardstick framework --------------------- +~~~~~~~~~~~~~~~~~~~ -Yardstick can be considered as a framework. Yardstick is release as a docker +Yardstick can be considered as a framework. Yardstick is released as a docker file, including tools, scripts and a CLI to prepare the environement and run -tests. It simplifies the integration of external test suites in CI pipeline -and provide commodity tools to collect and display results. +tests. It simplifies the integration of external test suites in CI pipelines +and provides commodity tools to collect and display results. -Since Danube, test categories also known as tiers have been created to group +Since Danube, test categories (also known as tiers) have been created to group similar tests, provide consistant sub-lists and at the end optimize test duration for CI (see How To section). @@ -56,44 +84,54 @@ The tiers are: How Todos? -=========== +---------- How Yardstick works? ---------------------- +~~~~~~~~~~~~~~~~~~~~ The installation and configuration of the Yardstick is described in the `user guide`_. How to work with test cases? ----------------------------- - +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -**Sample Test cases** +Sample Test cases ++++++++++++++++++ -Yardstick provides many sample test cases which are located at "samples" directory of repo. +Yardstick provides many sample test cases which are located at ``samples`` directory of repo. -Sample test cases are designed as following goals: +Sample test cases are designed with the following goals: -1. Helping user better understand yardstick features(including new feature and new test capacity). +1. Helping user better understand Yardstick features (including new feature and + new test capacity). -2. Helping developer to debug his new feature and test case before it is offical released. +2. Helping developer to debug a new feature and test case before it is + offically released. -3. Helping other developers understand and verify the new patch before the patch merged. +3. Helping other developers understand and verify the new patch before the + patch is merged. -So developers should upload your sample test case as well when they are trying to upload a new patch which is about the yardstick new test case or new feature. +Developers should upload their sample test cases as well when they are +uploading a new patch which is about the Yardstick new test case or new feature. -**OPNFV Release Test cases** +OPNFV Release Test cases +++++++++++++++++++++++++ -OPNFV Release test cases which are located at "tests/opnfv/test_cases" of repo. -those test cases are runing by OPNFV CI jobs, It means those test cases should be more mature than sample test cases. -OPNFV scenario owners can select related test cases and add them into the test suites which is represent the scenario. +OPNFV Release test cases are located at ``yardstick/tests/opnfv/test_cases``. +These test cases are run by OPNFV CI jobs, which means these test cases should +be more mature than sample test cases. +OPNFV scenario owners can select related test cases and add them into the test +suites which represent their scenario. -**Test case Description File** +Test case Description File +++++++++++++++++++++++++++ This section will introduce the meaning of the Test case description file. -we will use ping.yaml as a example to show you how to understand the test case description file. -In this Yaml file, you can easily find it consists of two sections. One is “Scenarios”, the other is “Context”.:: +we will use ping.yaml as a example to show you how to understand the test case +description file. +This ``yaml`` file consists of two sections. One is ``scenarios``, the other +is ``context``.:: --- # Sample benchmark task config file @@ -150,18 +188,32 @@ In this Yaml file, you can easily find it consists of two sections. One is “Sc {% endif %} -"Contexts" section is the description of pre-condition of testing. As ping.yaml shown, you can configure the image, flavor , name ,affinity and network of Test VM(servers), with this section, you will get a pre-condition env for Testing. -Yardstick will automatic setup the stack which are described in this section. -In fact, yardstick use convert this section to heat template and setup the VMs by heat-client (Meanwhile, yardstick can support to convert this section to Kubernetes template to setup containers). - -Two Test VMs(athena and ares) are configured by keyword "servers". -"flavor" will determine how many vCPU, how much memory for test VMs. -As "yardstick-flavor" is a basic flavor which will be automatically created when you run command "yardstick env prepare". "yardstick-flavor" is "1 vCPU 1G RAM,3G Disk". -"image" is the image name of test VMs. if you use cirros.3.5.0, you need fill the username of this image into "user". the "policy" of placement of Test VMs have two values (affinity and availability). -"availability" means anti-affinity. In "network" section, you can configure which provide network and physical_network you want Test VMs use. -you may need to configure segmentation_id when your network is vlan. - -Moreover, you can configure your specific flavor as below, yardstick will setup the stack for you. :: +The ``contexts`` section is the description of pre-condition of testing. As +``ping.yaml`` shows, you can configure the image, flavor, name, affinity and +network of Test VM (servers), with this section, you will get a pre-condition +env for Testing. +Yardstick will automatically setup the stack which are described in this +section. +Yardstick converts this section to heat template and sets up the VMs with +heat-client (Yardstick can also support to convert this section to Kubernetes +template to setup containers). + +In the examples above, two Test VMs (athena and ares) are configured by +keyword ``servers``. +``flavor`` will determine how many vCPU, how much memory for test VMs. +As ``yardstick-flavor`` is a basic flavor which will be automatically created +when you run command ``yardstick env prepare``. ``yardstick-flavor`` is +``1 vCPU 1G RAM,3G Disk``. +``image`` is the image name of test VMs. If you use ``cirros.3.5.0``, you need +fill the username of this image into ``user``. +The ``policy`` of placement of Test VMs have two values (``affinity`` and +``availability``). ``availability`` means anti-affinity. +In the ``network`` section, you can configure which ``provider`` network and +``physical_network`` you want Test VMs to use. +You may need to configure ``segmentation_id`` when your network is vlan. + +Moreover, you can configure your specific flavor as below, Yardstick will setup +the stack for you. :: flavor: name: yardstick-new-flavor @@ -170,7 +222,8 @@ Moreover, you can configure your specific flavor as below, yardstick will setup disk: 2 -Besides default heat stack, yardstick also allow you to setup other two types stack. they are "Node" and "Kubernetes". :: +Besides default ``Heat`` context, Yardstick also allows you to setup two other +types of context. They are ``Node`` and ``Kubernetes``. :: context: type: Kubernetes @@ -183,48 +236,64 @@ and :: name: LF +The ``scenarios`` section is the description of testing steps, you can +orchestrate the complex testing step through scenarios. -"Scenarios" section is the description of testing step, you can orchestrate the complex testing step through orchestrate scenarios. +Each scenario will do one testing step. +In one scenario, you can configure the type of scenario (operation), ``runner`` +type and ``sla`` of the scenario. -Each scenario will do one testing step, In one scenario, you can configure the type of scenario(operation), runner type and SLA of the scenario. +For TC002, We only have one step, which is Ping from host VM to target VM. In +this step, we also have some detailed operations implemented (such as ssh to +VM, ping from VM1 to VM2. Get the latency, verify the SLA, report the result). -For TC002, We only have one step , that is Ping from host VM to target VM. In this step, we also have some detail operation implement ( such as ssh to VM, ping from VM1 to VM2. Get the latency, verify the SLA, report the result). +If you want to get this implementation details implement, you can check with +the scenario.py file. For Ping scenario, you can find it in Yardstick repo +(``yardstick/yardstick/benchmark/scenarios/networking/ping.py``). -If you want to get this detail implement , you can check with the scenario.py file. For Ping scenario, you can find it in yardstick repo ( yardstick / yardstick / benchmark / scenarios / networking / ping.py) +After you select the type of scenario (such as Ping), you will select one type +of ``runner``, there are 4 types of runner. ``Iteration`` and ``Duration`` are +the most commonly used, and the default is ``Iteration``. -after you select the type of scenario( such as Ping), you will select one type of runner, there are 4 types of runner. Usually, we use the "Iteration" and "Duration". and Default is "Iteration". -For Iteration, you can specify the iteration number and interval of iteration. :: +For ``Iteration``, you can specify the iteration number and interval of iteration. :: runner: type: Iteration iterations: 10 interval: 1 -That means yardstick will iterate the 10 times of Ping test and the interval of each iteration is one second. +That means Yardstick will repeat the Ping test 10 times and the interval of +each iteration is one second. -For Duration, you can specify the duration of this scenario and the interval of each ping test. :: +For ``Duration``, you can specify the duration of this scenario and the +interval of each ping test. :: runner: type: Duration duration: 60 interval: 10 -That means yardstick will run the ping test as loop until the total time of this scenario reach the 60s and the interval of each loop is ten seconds. - +That means Yardstick will run the ping test as loop until the total time of +this scenario reaches 60s and the interval of each loop is ten seconds. -SLA is the criterion of this scenario. that depends on the scenario. different scenario can have different SLA metric. +SLA is the criterion of this scenario. This depends on the scenario. Different +scenarios can have different SLA metric. -**How to write a new test case** -Yardstick already provide a library of testing step. that means yardstick provide lots of type scenario. +How to write a new test case +++++++++++++++++++++++++++++ -Basiclly, What you need to do is to orchestrate the scenario from the library. +Yardstick already provides a library of testing steps (i.e. different types of +scenario). -Here, We will show two cases. One is how to write a simple test case, the other is how to write a quite complex test case. +Basically, what you need to do is to orchestrate the scenario from the library. +Here, we will show two cases. One is how to write a simple test case, the other +is how to write a quite complex test case. Write a new simple test case +'''''''''''''''''''''''''''' First, you can image a basic test case description as below. @@ -314,7 +383,7 @@ First, you can image a basic test case description as below. TODO How can I contribute to Yardstick? ------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are already a contributor of any OPNFV project, you can contribute to Yardstick. If you are totally new to OPNFV, you must first create your Linux @@ -329,7 +398,7 @@ We distinguish 2 levels of contributors: Yardstick commitors are promoted by the Yardstick contributors. Gerrit & JIRA introduction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +++++++++++++++++++++++++++ .. _Gerrit: https://www.gerritcodereview.com/ .. _`OPNFV Gerrit`: http://gerrit.opnfv.org/ @@ -338,7 +407,8 @@ Gerrit & JIRA introduction OPNFV uses Gerrit_ for web based code review and repository management for the Git Version Control System. You can access `OPNFV Gerrit`_. Please note that -you need to have Linux Foundation ID in order to use OPNFV Gerrit. You can get one from this link_. +you need to have Linux Foundation ID in order to use OPNFV Gerrit. You can get +one from this link_. OPNFV uses JIRA_ for issue management. An important principle of change management is to have two-way trace-ability between issue management @@ -350,14 +420,16 @@ If you want to contribute to Yardstick, you can pick a issue from Yardstick's JIRA dashboard or you can create you own issue and submit it to JIRA. Install Git and Git-reviews -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++++++++++++++++++++++++++++ Installing and configuring Git and Git-Review is necessary in order to submit -code to Gerrit. The `Getting to the code <https://wiki.opnfv.org/display/DEV/Developer+Getting+Started>`_ page will provide you with some help for that. +code to Gerrit. The +`Getting to the code <https://wiki.opnfv.org/display/DEV/Developer+Getting+Started>`_ +page will provide you with some help for that. Verify your patch locally before submitting -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++++++++++++++++++++++++++++++++++++++++++++ Once you finish a patch, you can submit it to Gerrit for code review. A developer sends a new patch to Gerrit will trigger patch verify job on Jenkins @@ -366,7 +438,8 @@ code coverage test. Before you submit your patch, it is recommended to run the patch verification in your local environment first. Open a terminal window and set the project's directory to the working -directory using the ``cd`` command. Assume that ``YARDSTICK_REPO_DIR`` is the path to the Yardstick project folder on your computer:: +directory using the ``cd`` command. Assume that ``YARDSTICK_REPO_DIR`` is the +path to the Yardstick project folder on your computer:: cd $YARDSTICK_REPO_DIR @@ -377,7 +450,7 @@ Verify your patch:: It is used in CI but also by the CLI. Submit the code with Git -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +++++++++++++++++++++++++ Tell Git which files you would like to take into account for the next commit. This is called 'staging' the files, by placing them into the staging area, @@ -417,7 +490,7 @@ to the commits, and eventually navigate among the latter more easily. `This document`_ happened to be very clear and useful to get started with that. Push the code to Gerrit for review -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +++++++++++++++++++++++++++++++++++ Now that the code has been comitted into your local Git repository the following step is to push it online to Gerrit for it to be reviewed. The @@ -432,27 +505,27 @@ Yardstick committers and contributors to review your codes. :width: 800px :alt: Gerrit for code review -You can find a list Yardstick people `here <https://wiki.opnfv.org/display/yardstick/People>`_, -or use the ``yardstick-reviewers`` and ``yardstick-committers`` groups in gerrit. +You can find a list Yardstick people +`here <https://wiki.opnfv.org/display/yardstick/People>`_, or use the +``yardstick-reviewers`` and ``yardstick-committers`` groups in gerrit. Modify the code under review in Gerrit -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +++++++++++++++++++++++++++++++++++++++ At the same time the code is being reviewed in Gerrit, you may need to edit it to make some changes and then send it back for review. The following steps go through the procedure. Once you have modified/edited your code files under your IDE, you will have to -stage them. The 'status' command is very helpful at this point as it provides -an overview of Git's current state:: +stage them. The ``git status`` command is very helpful at this point as it +provides an overview of Git's current state:: git status -The output of the command provides us with the files that have been modified -after the latest commit. +This command lists the files that have been modified since the last commit. You can now stage the files that have been modified as part of the Gerrit code -review edition/modification/improvement using ``git add`` command. It is now +review addition/modification/improvement using ``git add`` command. It is now time to commit the newly modified files, but the objective here is not to create a new commit, we simply want to inject the new changes into the previous commit. You can achieve that with the '--amend' option on the @@ -469,7 +542,8 @@ The final step consists in pushing the newly modified commit to Gerrit:: Plugins -========== +------- -For information about Yardstick plugins, refer to the chapter **Installing a plug-in into Yardstick** in the `user guide`_. +For information about Yardstick plugins, refer to the chapter +**Installing a plug-in into Yardstick** in the `user guide`_. diff --git a/nsb_setup.sh b/nsb_setup.sh index 3396b82d1..1f5344980 100755 --- a/nsb_setup.sh +++ b/nsb_setup.sh @@ -63,7 +63,7 @@ for i in "${pkg[@]}"; do fi done -pip install ansible==2.4.2 shade==1.22.2 docker-py==1.10.6 +pip install ansible==2.5.5 shade==1.22.2 docker-py==1.10.6 ANSIBLE_SCRIPTS="ansible" diff --git a/requirements.txt b/requirements.txt index 60014d75e..43d7120db 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ Babel==2.3.4 # BSD; OSI Approved BSD License Jinja2==2.9.6 # BSD; OSI Approved BSD License SQLAlchemy==1.1.12 # MIT License; OSI Approved MIT License PTable==0.9.2 # BSD (3 clause); OSI Approved BSD License -ansible==2.4.2 # GPLv3; OSI Approved GNU General Public License v3 or later (GPLv3+) +ansible==2.5.5 # GPLv3; OSI Approved GNU General Public License v3 or later (GPLv3+) backport-ipaddress==0.1; python_version <= "2.7" # OSI Approved Python Software Foundation License chainmap==1.0.2 # Python Software Foundation License; OSI Approved Python Software Foundation License cmd2==0.8.6 # MIT License; OSI Approved MIT License @@ -29,20 +29,20 @@ futures==3.1.1;python_version=='2.7' # BSD; OSI Approved BSD License influxdb==4.1.1 # MIT License; OSI Approved MIT License IxNetwork==8.40.1124.9 # MIT License; OSI Approved MIT License jinja2schema==0.1.4 # OSI Approved BSD License -keystoneauth1==3.1.0 # OSI Approved Apache Software License +keystoneauth1==3.3.0 # OSI Approved Apache Software License kubernetes==6.0.0 # OSI Approved Apache Software License mock==2.0.0 # OSI Approved BSD License; `BSD License`_; http://github.com/testing-cabal/mock/blob/master/LICENSE.txt msgpack-python==0.4.8 # OSI Approved Apache Software License netaddr==0.7.19 # BSD License; OSI Approved BSD License; OSI Approved MIT License netifaces==0.10.6 # MIT License; OSI Approved MIT License os-client-config==1.28.0 # OSI Approved Apache Software License -osc-lib==1.7.0 # OSI Approved Apache Software License -oslo.config==4.11.1 # OSI Approved Apache Software License +osc-lib==1.8.0 # OSI Approved Apache Software License +oslo.config==5.1.0 # OSI Approved Apache Software License oslo.i18n==3.17.0 # OSI Approved Apache Software License -oslo.messaging===5.36.0 # OSI Approved Apache Software License -oslo.privsep===1.22.1 # OSI Approved Apache Software License +oslo.messaging==5.36.0 # OSI Approved Apache Software License +oslo.privsep==1.23.0 # OSI Approved Apache Software License oslo.serialization==2.20.1 # OSI Approved Apache Software License -oslo.utils==3.28.0 # OSI Approved Apache Software License +oslo.utils==3.33.0 # OSI Approved Apache Software License paramiko==2.2.1 # LGPL; OSI Approved GNU Library or Lesser General Public License (LGPL) pbr==3.1.1 # OSI Approved Apache Software License; Apache License, Version 2.0 pika==0.10.0 # BSD; OSI Approved BSD License @@ -52,13 +52,13 @@ pycrypto==2.6.1 # Public Domain pyparsing==2.2.0 # MIT License; OSI Approved MIT License pyroute2==0.4.21 # dual license GPLv2+ and Apache v2; OSI Approved GNU General Public License v2 or later (GPLv2+); OSI Approved Apache Software License pyrsistent==0.14.1 # LICENSE.mit; OSI Approved MIT License -python-cinderclient==3.1.0 # OSI Approved Apache Software License +python-cinderclient==3.3.0 # OSI Approved Apache Software License python-glanceclient==2.8.0 # OSI Approved Apache Software License python-keystoneclient==3.13.0 # OSI Approved Apache Software License python-neutronclient==6.5.0 # OSI Approved Apache Software License python-novaclient==9.1.1 # OSI Approved Apache Software License pyzmq==16.0.2 # LGPL+BSD; OSI Approved GNU Library or Lesser General Public License (LGPL); OSI Approved BSD License -requests==2.11.1 # Apache 2.0; OSI Approved Apache Software License +requests==2.14.2 # Apache 2.0; OSI Approved Apache Software License requestsexceptions==1.3.0 # OSI Approved Apache Software License scp==0.10.2 # LGPL shade==1.22.2 # OSI Approved Apache Software License diff --git a/test-requirements.txt b/test-requirements.txt index 4828e98b0..7825cc5d2 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -14,8 +14,8 @@ testtools==2.3.0 # OSI Approved MIT License unittest2==1.1.0 # OSI Approved BSD License # NOTE(ralonsoh): to be removed, only for coverage support -python-heatclient==1.8.1 # OSI Approved Apache Software License +python-heatclient==1.11.0 # OSI Approved Apache Software License -# Yardstick F release <-> OpenStack Pike release -openstack_requirements==1.1.0 # OSI Approved Apache Software License --e git+https://github.com/openstack/requirements.git@stable/pike#egg=os_requirements +# Yardstick G release <-> OpenStack Queens release +openstack_requirements==1.2.0 # OSI Approved Apache Software License +-e git+https://github.com/openstack/requirements.git@stable/queens#egg=os_requirements diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py index e105c2f55..39336785e 100644 --- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py +++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py @@ -25,6 +25,10 @@ class IXIARFC2544Profile(TrexProfile): UPLINK = 'uplink' DOWNLINK = 'downlink' + def __init__(self, yaml_data): + super(IXIARFC2544Profile, self).__init__(yaml_data) + self.rate = self.config.frame_rate + def _get_ixia_traffic_profile(self, profile_data, mac=None): if mac is None: mac = {} diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py index 02e7803f7..102c66f78 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py @@ -20,7 +20,7 @@ import os import shutil from collections import OrderedDict -from subprocess import call +import subprocess from yardstick.common import utils from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen @@ -101,7 +101,7 @@ class IxLoadResourceHelper(ClientResourceHelper): LOG.debug(cmd) if not os.path.ismount(self.RESULTS_MOUNT): - call(cmd, shell=True) + subprocess.call(cmd, shell=True) shutil.rmtree(self.RESULTS_MOUNT, ignore_errors=True) utils.makedirs(self.RESULTS_MOUNT) @@ -157,7 +157,7 @@ class IxLoadTrafficGen(SampleVNFTrafficGen): args="'%s'" % ixload_config) LOG.debug(cmd) - call(cmd, shell=True) + subprocess.call(cmd, shell=True) with open(self.ssh_helper.join_bin_path("ixLoad_HTTP_Client.csv")) as csv_file: lines = csv_file.readlines()[10:] @@ -172,5 +172,5 @@ class IxLoadTrafficGen(SampleVNFTrafficGen): self.resource_helper.data = self.resource_helper.make_aggregates() def terminate(self): - call(["pkill", "-9", "http_ixload.py"]) + subprocess.call(["pkill", "-9", "http_ixload.py"]) super(IxLoadTrafficGen, self).terminate() diff --git a/yardstick/tests/unit/apiserver/utils/test_influx.py b/yardstick/tests/unit/apiserver/utils/test_influx.py index 95105d8ae..6021d35df 100644 --- a/yardstick/tests/unit/apiserver/utils/test_influx.py +++ b/yardstick/tests/unit/apiserver/utils/test_influx.py @@ -31,15 +31,17 @@ class GetDataDbClientTestCase(base.BaseUnitTestCase): _mock_parser.read.assert_called_once_with(constants.CONF_FILE) mock_get_client.assert_called_once_with(_mock_parser) + @mock.patch.object(influx.logger, 'error') @mock.patch.object(influx, '_get_influxdb_client', return_value='fake_client') @mock.patch.object(influx.ConfigParser, 'ConfigParser') - def test_get_data_db_client_parsing_error(self, mock_parser, - mock_get_client): + def test_get_data_db_client_parsing_error( + self, mock_parser, mock_get_client, *args): _mock_parser = mock.Mock() mock_parser.return_value = _mock_parser mock_parser.NoOptionError = configparser.NoOptionError - mock_get_client.side_effect = configparser.NoOptionError('option', 'section') + mock_get_client.side_effect = configparser.NoOptionError('option', + 'section') with self.assertRaises(configparser.NoOptionError): influx.get_data_db_client() diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py index 5be22a034..a4a8359d5 100644 --- a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py +++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py @@ -58,7 +58,10 @@ class OvsDpdkContextTestCase(unittest.TestCase): 'file': self._get_file_abspath(self.NODES_ovs_dpdk_SAMPLE) } self.ovs_dpdk = ovs_dpdk.OvsDpdkContext() + self._mock_log = mock.patch.object(ovs_dpdk, 'LOG') + self.mock_log = self._mock_log.start() self.addCleanup(self._remove_contexts) + self.addCleanup(self._stop_mocks) @staticmethod def _remove_contexts(): @@ -66,6 +69,9 @@ class OvsDpdkContextTestCase(unittest.TestCase): context._delete_context() base.Context.list = [] + def _stop_mocks(self): + self._mock_log.stop() + @mock.patch('yardstick.benchmark.contexts.standalone.model.Server') @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper') def test___init__(self, mock_helper, mock_server): diff --git a/yardstick/tests/unit/benchmark/contexts/test_node.py b/yardstick/tests/unit/benchmark/contexts/test_node.py index 5d7b24c3d..7fd13a406 100644 --- a/yardstick/tests/unit/benchmark/contexts/test_node.py +++ b/yardstick/tests/unit/benchmark/contexts/test_node.py @@ -170,7 +170,7 @@ class NodeContextTestCase(unittest.TestCase): def test__get_physical_nodes(self): self.test_context.init(self.attrs) nodes = self.test_context._get_physical_nodes() - self.assertEquals(nodes, self.test_context.nodes) + self.assertEqual(nodes, self.test_context.nodes) def test__get_physical_node_for_server(self): self.test_context.init(self.attrs) @@ -198,7 +198,7 @@ class NodeContextTestCase(unittest.TestCase): node_collectd_options = [node for node in self.test_context.nodes if node['name'] == 'node1'][0]['collectd'] - self.assertEquals(node_collectd_options, options) + self.assertEqual(node_collectd_options, options) @mock.patch('{}.NodeContext._dispatch_script'.format(PREFIX)) def test_deploy(self, dispatch_script_mock): diff --git a/yardstick/tests/unit/benchmark/core/test_plugin.py b/yardstick/tests/unit/benchmark/core/test_plugin.py index 0d14e4e86..53621316b 100644 --- a/yardstick/tests/unit/benchmark/core/test_plugin.py +++ b/yardstick/tests/unit/benchmark/core/test_plugin.py @@ -12,6 +12,7 @@ import os import pkg_resources import mock +import six import testtools from yardstick import ssh @@ -48,13 +49,17 @@ deployment: self.mock_ssh_from_node.return_value = self.mock_ssh_obj self.mock_ssh_obj.wait = mock.Mock() self.mock_ssh_obj._put_file_shell = mock.Mock() + self._mock_log_info = mock.patch.object(plugin.LOG, 'info') + self.mock_log_info = self._mock_log_info.start() self.addCleanup(self._cleanup) def _cleanup(self): self._mock_ssh_from_node.stop() + self._mock_log_info.stop() - def test_install(self): + @mock.patch.object(six.moves.builtins, 'print') + def test_install(self, *args): args = mock.Mock() args.input_file = [mock.Mock()] with mock.patch.object(self.plugin, '_install_setup') as \ @@ -65,7 +70,8 @@ deployment: PluginTestCase.DEPLOYMENT) mock_run.assert_called_once_with(PluginTestCase.NAME) - def test_remove(self): + @mock.patch.object(six.moves.builtins, 'print') + def test_remove(self, *args): args = mock.Mock() args.input_file = [mock.Mock()] with mock.patch.object(self.plugin, '_remove_setup') as \ diff --git a/yardstick/tests/unit/benchmark/runner/test_base.py b/yardstick/tests/unit/benchmark/runner/test_base.py index 727207f5a..559c991f3 100644 --- a/yardstick/tests/unit/benchmark/runner/test_base.py +++ b/yardstick/tests/unit/benchmark/runner/test_base.py @@ -10,36 +10,40 @@ import time import mock -import unittest -from subprocess import CalledProcessError +import subprocess - -from yardstick.benchmark.runners import base +from yardstick.benchmark.runners import base as runner_base from yardstick.benchmark.runners import iteration +from yardstick.tests.unit import base as ut_base -class ActionTestCase(unittest.TestCase): +class ActionTestCase(ut_base.BaseUnitTestCase): - @mock.patch("yardstick.benchmark.runners.base.subprocess") - def test__execute_shell_command(self, mock_subprocess): - mock_subprocess.check_output.side_effect = CalledProcessError(-1, '') + def setUp(self): + self._mock_log = mock.patch.object(runner_base.log, 'error') + self.mock_log = self._mock_log.start() + self.addCleanup(self._stop_mocks) - self.assertEqual(base._execute_shell_command("")[0], -1) + def _stop_mocks(self): + self._mock_log.stop() - @mock.patch("yardstick.benchmark.runners.base.subprocess") - def test__single_action(self, mock_subprocess): - mock_subprocess.check_output.side_effect = CalledProcessError(-1, '') + @mock.patch.object(subprocess, 'check_output') + def test__execute_shell_command(self, mock_subprocess): + mock_subprocess.side_effect = subprocess.CalledProcessError(-1, '') + self.assertEqual(runner_base._execute_shell_command("")[0], -1) - base._single_action(0, "echo", mock.MagicMock()) + @mock.patch.object(subprocess, 'check_output') + def test__single_action(self, mock_subprocess): + mock_subprocess.side_effect = subprocess.CalledProcessError(-1, '') + runner_base._single_action(0, 'echo', mock.Mock()) - @mock.patch("yardstick.benchmark.runners.base.subprocess") + @mock.patch.object(subprocess, 'check_output') def test__periodic_action(self, mock_subprocess): - mock_subprocess.check_output.side_effect = CalledProcessError(-1, '') - - base._periodic_action(0, "echo", mock.MagicMock()) + mock_subprocess.side_effect = subprocess.CalledProcessError(-1, '') + runner_base._periodic_action(0, 'echo', mock.Mock()) -class RunnerTestCase(unittest.TestCase): +class RunnerTestCase(ut_base.BaseUnitTestCase): def setUp(self): config = { @@ -86,7 +90,7 @@ class RunnerTestCase(unittest.TestCase): self.assertEqual(idle_result, actual_result) def test__run_benchmark(self): - runner = base.Runner(mock.Mock()) + runner = runner_base.Runner(mock.Mock()) with self.assertRaises(NotImplementedError): runner._run_benchmark(mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()) diff --git a/yardstick/tests/unit/benchmark/runner/test_proxduration.py b/yardstick/tests/unit/benchmark/runner/test_proxduration.py index be1715aad..3299c5b05 100644 --- a/yardstick/tests/unit/benchmark/runner/test_proxduration.py +++ b/yardstick/tests/unit/benchmark/runner/test_proxduration.py @@ -1,23 +1,29 @@ -############################################################################## -# Copyright (c) 2018 Nokia and others. +# Copyright (c) 2018 Intel Corporation # -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import mock import unittest import multiprocessing import os -import time from yardstick.benchmark.runners import proxduration +from yardstick.common import constants from yardstick.common import exceptions as y_exc class ProxDurationRunnerTest(unittest.TestCase): + class MyMethod(object): SLA_VALIDATION_ERROR_SIDE_EFFECT = 1 BROAD_EXCEPTION_SIDE_EFFECT = 2 @@ -69,38 +75,37 @@ class ProxDurationRunnerTest(unittest.TestCase): @mock.patch.object(os, 'getpid') def test__worker_process_runner_id(self, mock_os_getpid): mock_os_getpid.return_value = 101 - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) - self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101) + self.assertEqual(101, self.scenario_cfg['runner']['runner_id']) def test__worker_process_called_with_cfg(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() def test__worker_process_called_with_cfg_loop(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.01} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() self.assertGreater(self.benchmark.my_method.call_count, 2) def test__worker_process_called_without_cfg(self): scenario_cfg = {'runner': {}} - aborted = multiprocessing.Event() aborted.set() - - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - scenario_cfg, {}, aborted, mock.Mock()) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', scenario_cfg, {}, + aborted, mock.Mock()) self.benchmark_cls.assert_called_once_with(scenario_cfg, {}) self.benchmark.setup.assert_called_once() @@ -108,188 +113,174 @@ class ProxDurationRunnerTest(unittest.TestCase): def test__worker_process_output_queue(self): self.benchmark.my_method = mock.Mock(return_value='my_result') - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - output_queue = multiprocessing.Queue() - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), output_queue) - time.sleep(0.1) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + output_queue = mock.Mock() + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), output_queue) self._assert_defaults__worker_run_setup_and_teardown() - self.assertEquals(output_queue.get(), 'my_result') + output_queue.put.assert_has_calls( + [mock.call('my_result', True, constants.QUEUE_PUT_TIMEOUT)]) def test__worker_process_output_queue_multiple_iterations(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = self.MyMethod() - - output_queue = multiprocessing.Queue() - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), output_queue) - time.sleep(0.1) + output_queue = mock.Mock() + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), output_queue) self._assert_defaults__worker_run_setup_and_teardown() - self.assertGreater(self.benchmark.my_method.count, 103) - - count = 101 - while not output_queue.empty(): - count += 1 - self.assertEquals(output_queue.get(), count) + for idx in range(102, 101 + len(output_queue.method_calls)): + output_queue.put.assert_has_calls( + [mock.call(idx, True, constants.QUEUE_PUT_TIMEOUT)]) def test__worker_process_queue(self): self.benchmark.my_method = self.MyMethod() - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - queue = multiprocessing.Queue() - timestamp = time.time() - proxduration._worker_process(queue, self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - time.sleep(0.1) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + queue = mock.Mock() + proxduration._worker_process( + queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - - result = queue.get() - self.assertGreater(result['timestamp'], timestamp) - self.assertEqual(result['errors'], '') - self.assertEqual(result['data'], {'my_key': 102}) - self.assertEqual(result['sequence'], 1) + benchmark_output = {'timestamp': mock.ANY, + 'sequence': 1, + 'data': {'my_key': 102}, + 'errors': ''} + queue.put.assert_has_calls( + [mock.call(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)]) def test__worker_process_queue_multiple_iterations(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = self.MyMethod() - - queue = multiprocessing.Queue() - timestamp = time.time() - proxduration._worker_process(queue, self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - time.sleep(0.1) + queue = mock.Mock() + proxduration._worker_process( + queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - self.assertGreater(self.benchmark.my_method.count, 103) - - count = 0 - while not queue.empty(): - count += 1 - result = queue.get() - self.assertGreater(result['timestamp'], timestamp) - self.assertEqual(result['errors'], '') - self.assertEqual(result['data'], {'my_key': count + 101}) - self.assertEqual(result['sequence'], count) + for idx in range(102, 101 + len(queue.method_calls)): + benchmark_output = {'timestamp': mock.ANY, + 'sequence': idx - 101, + 'data': {'my_key': idx}, + 'errors': ''} + queue.put.assert_has_calls( + [mock.call(benchmark_output, True, + constants.QUEUE_PUT_TIMEOUT)]) def test__worker_process_except_sla_validation_error_no_sla_cfg(self): self.benchmark.my_method = mock.Mock( side_effect=y_exc.SLAValidationError) - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self): + @mock.patch.object(proxduration.LOG, 'warning') + def test__worker_process_except_sla_validation_error_sla_cfg_monitor( + self, *args): self.scenario_cfg['sla'] = {'action': 'monitor'} - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = mock.Mock( side_effect=y_exc.SLAValidationError) - - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg, + {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() def test__worker_process_raise_sla_validation_error_sla_cfg_default(self): self.scenario_cfg['sla'] = {} - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = mock.Mock( side_effect=y_exc.SLAValidationError) - with self.assertRaises(y_exc.SLAValidationError): - proxduration._worker_process(mock.Mock(), self.benchmark_cls, - 'my_method', self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock()) self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {}) self.benchmark.setup.assert_called_once() self.benchmark.my_method.assert_called_once_with({}) def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self): - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.scenario_cfg['sla'] = {'action': 'assert'} self.benchmark.my_method = mock.Mock( side_effect=y_exc.SLAValidationError) with self.assertRaises(y_exc.SLAValidationError): - proxduration._worker_process(mock.Mock(), self.benchmark_cls, - 'my_method', self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock()) self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {}) self.benchmark.setup.assert_called_once() self.benchmark.my_method.assert_called_once_with({}) - def test__worker_process_queue_on_sla_validation_error_monitor(self): + @mock.patch.object(proxduration.LOG, 'warning') + def test__worker_process_queue_on_sla_validation_error_monitor( + self, *args): self.scenario_cfg['sla'] = {'action': 'monitor'} - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} self.benchmark.my_method = self.MyMethod( side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT) - - queue = multiprocessing.Queue() - timestamp = time.time() - proxduration._worker_process(queue, self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - time.sleep(0.1) + queue = mock.Mock() + proxduration._worker_process( + queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - - result = queue.get() - self.assertGreater(result['timestamp'], timestamp) - self.assertEqual(result['errors'], ('My Case SLA validation failed. ' - 'Error: my error message',)) - self.assertEqual(result['data'], {'my_key': 102}) - self.assertEqual(result['sequence'], 1) - - def test__worker_process_broad_exception(self): + benchmark_output = {'timestamp': mock.ANY, + 'sequence': 1, + 'data': {'my_key': 102}, + 'errors': ('My Case SLA validation failed. ' + 'Error: my error message', )} + queue.put.assert_has_calls( + [mock.call(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)]) + + @mock.patch.object(proxduration.LOG, 'exception') + def test__worker_process_broad_exception(self, *args): self.benchmark.my_method = mock.Mock( side_effect=y_exc.YardstickException) - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - def test__worker_process_queue_on_broad_exception(self): + @mock.patch.object(proxduration.LOG, 'exception') + def test__worker_process_queue_on_broad_exception(self, *args): self.benchmark.my_method = self.MyMethod( side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT) - - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} - queue = multiprocessing.Queue() - timestamp = time.time() - proxduration._worker_process(queue, self.benchmark_cls, 'my_method', - self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - time.sleep(0.1) - - self._assert_defaults__worker_run_setup_and_teardown() - - result = queue.get() - self.assertGreater(result['timestamp'], timestamp) - self.assertNotEqual(result['errors'], '') - self.assertEqual(result['data'], {'my_key': 102}) - self.assertEqual(result['sequence'], 1) - - def test__worker_process_benchmark_teardown_on_broad_exception(self): + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} + queue = mock.Mock() + proxduration._worker_process( + queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) + + benchmark_output = {'timestamp': mock.ANY, + 'sequence': 1, + 'data': {'my_key': 102}, + 'errors': mock.ANY} + queue.put.assert_has_calls( + [mock.call(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)]) + + @mock.patch.object(proxduration.LOG, 'exception') + def test__worker_process_benchmark_teardown_on_broad_exception( + self, *args): self.benchmark.teardown = mock.Mock( side_effect=y_exc.YardstickException) - - self.scenario_cfg["runner"] = {"sampled": True, "duration": 1} + self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1} with self.assertRaises(SystemExit) as raised: - proxduration._worker_process(mock.Mock(), self.benchmark_cls, - 'my_method', self.scenario_cfg, {}, - multiprocessing.Event(), mock.Mock()) - self.assertEqual(raised.exception.code, 1) + proxduration._worker_process( + mock.Mock(), self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock()) + self.assertEqual(1, raised.exception.code) self._assert_defaults__worker_run_setup_and_teardown() diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py index cd065c961..dbf3d83b2 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py @@ -13,6 +13,7 @@ import unittest from yardstick.benchmark.scenarios.availability import scenario_general from yardstick.common import exceptions as y_exc + class ScenarioGeneralTestCase(unittest.TestCase): @mock.patch.object(scenario_general, 'Director') @@ -37,19 +38,21 @@ class ScenarioGeneralTestCase(unittest.TestCase): 'index': 2}] } } - self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, None) + self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, + None) self.instance.setup() self.instance.director.verify.return_value = True def test_scenario_general_all_successful(self): - ret = {} self.instance.run(ret) self.instance.teardown() self.assertEqual(ret['sla_pass'], 1) - def test_scenario_general_exception(self): - self.instance.director.createActionPlayer.side_effect = KeyError('Wrong') + @mock.patch.object(scenario_general.LOG, 'exception') + def test_scenario_general_exception(self, *args): + self.instance.director.createActionPlayer.side_effect = ( + KeyError('Wrong')) self.instance.director.data = {} ret = {} self.instance.run(ret) diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py index 2190e9337..5f342df7d 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py @@ -7,10 +7,6 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf - -from __future__ import absolute_import - import os import unittest @@ -22,7 +18,7 @@ from yardstick.benchmark.scenarios.networking import iperf3 from yardstick.common import exceptions as y_exc -@mock.patch('yardstick.benchmark.scenarios.networking.iperf3.ssh') +@mock.patch.object(iperf3, 'ssh') class IperfTestCase(unittest.TestCase): output_name_tcp = 'iperf3_sample_output.json' output_name_udp = 'iperf3_sample_output_udp.json' @@ -41,9 +37,14 @@ class IperfTestCase(unittest.TestCase): 'ipaddr': '172.16.0.138', } } + self._mock_log_info = mock.patch.object(iperf3.LOG, 'info') + self.mock_log_info = self._mock_log_info.start() + self.addCleanup(self._stop_mocks) - def test_iperf_successful_setup(self, mock_ssh): + def _stop_mocks(self): + self._mock_log_info.stop() + def test_iperf_successful_setup(self, mock_ssh): p = iperf3.Iperf({}, self.ctx) mock_ssh.SSH.from_node().execute.return_value = (0, '', '') @@ -53,13 +54,11 @@ class IperfTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.assert_called_with("iperf3 -s -D") def test_iperf_unsuccessful_setup(self, mock_ssh): - p = iperf3.Iperf({}, self.ctx) mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, p.setup) def test_iperf_successful_teardown(self, mock_ssh): - p = iperf3.Iperf({}, self.ctx) mock_ssh.SSH.from_node().execute.return_value = (0, '', '') p.host = mock_ssh.SSH.from_node() @@ -70,7 +69,6 @@ class IperfTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.assert_called_with("pkill iperf3") def test_iperf_successful_no_sla(self, mock_ssh): - options = {} args = {'options': options} result = {} @@ -86,7 +84,6 @@ class IperfTestCase(unittest.TestCase): self.assertEqual(result, expected_result) def test_iperf_successful_sla(self, mock_ssh): - options = {} args = { 'options': options, @@ -105,7 +102,6 @@ class IperfTestCase(unittest.TestCase): self.assertEqual(result, expected_result) def test_iperf_unsuccessful_sla(self, mock_ssh): - options = {} args = { 'options': options, @@ -174,7 +170,6 @@ class IperfTestCase(unittest.TestCase): self.assertEqual(result, expected_result) def test_iperf_unsuccessful_script_error(self, mock_ssh): - options = {} args = {'options': options} result = {} @@ -186,7 +181,8 @@ class IperfTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, p.run, result) - def _read_sample_output(self, filename): + @staticmethod + def _read_sample_output(filename): curr_path = os.path.dirname(os.path.abspath(__file__)) output = os.path.join(curr_path, filename) with open(output) as f: diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py index c05d2ced2..db6f9cc89 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py @@ -18,8 +18,8 @@ import time import mock import unittest -from yardstick.benchmark.scenarios.networking import vsperf_dpdk from yardstick import exceptions as y_exc +from yardstick.benchmark.scenarios.networking import vsperf_dpdk class VsperfDPDKTestCase(unittest.TestCase): @@ -59,17 +59,18 @@ class VsperfDPDKTestCase(unittest.TestCase): self.scenario = vsperf_dpdk.VsperfDPDK(self.args, self.ctx) - self._mock_ssh = mock.patch( - 'yardstick.benchmark.scenarios.networking.vsperf_dpdk.ssh') + self._mock_ssh = mock.patch.object(vsperf_dpdk, 'ssh') self.mock_ssh = self._mock_ssh.start() self._mock_subprocess_call = mock.patch.object(subprocess, 'call') self.mock_subprocess_call = self._mock_subprocess_call.start() - + self._mock_log_info = mock.patch.object(vsperf_dpdk.LOG, 'info') + self.mock_log_info = self._mock_log_info.start() self.addCleanup(self._cleanup) def _cleanup(self): self._mock_ssh.stop() self._mock_subprocess_call.stop() + self._mock_log_info.stop() def test_setup(self): # setup() specific mocks diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py index 6b3532fa2..3bb8b9192 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from copy import deepcopy +import copy import mock import unittest @@ -440,6 +440,12 @@ class TestIXIARFC2544Profile(unittest.TestCase): result = r_f_c2544_profile._get_ixia_traffic_profile(profile_data, mac) self.assertIsNotNone(result) + def test__init__(self): + t_profile_data = copy.deepcopy(self.TRAFFIC_PROFILE) + t_profile_data['traffic_profile']['frame_rate'] = 12345678 + r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(t_profile_data) + self.assertEqual(12345678, r_f_c2544_profile.rate) + def test__get_ixia_traffic_profile_default_args(self): r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile( self.TRAFFIC_PROFILE) @@ -521,7 +527,7 @@ class TestIXIARFC2544Profile(unittest.TestCase): traffic_generator.vnfd_helper.port_num.side_effect = ports_expected traffic_generator.client.return_value = True - traffic_profile = deepcopy(self.TRAFFIC_PROFILE) + traffic_profile = copy.deepcopy(self.TRAFFIC_PROFILE) traffic_profile.update({ "uplink_0": ["xe0"], "downlink_0": ["xe1", "xe2"], diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py index 036746e6b..c062308e8 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py @@ -11,23 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# import unittest import mock -from yardstick.tests import STL_MOCKS +from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxTestDataTuple +from yardstick.network_services.traffic_profile import prox_binsearch -STLClient = mock.MagicMock() -stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) -stl_patch.start() -if stl_patch: - from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxTestDataTuple - from yardstick.network_services.traffic_profile.prox_binsearch import ProxBinSearchProfile +class TestProxBinSearchProfile(unittest.TestCase): + def setUp(self): + self._mock_log_info = mock.patch.object(prox_binsearch.LOG, 'info') + self.mock_log_info = self._mock_log_info.start() + self.addCleanup(self._stop_mocks) -class TestProxBinSearchProfile(unittest.TestCase): + def _stop_mocks(self): + self._mock_log_info.stop() def test_execute_1(self): def target(*args, **_): @@ -60,7 +60,7 @@ class TestProxBinSearchProfile(unittest.TestCase): profile_helper = mock.MagicMock() profile_helper.run_test = target - profile = ProxBinSearchProfile(tp_config) + profile = prox_binsearch.ProxBinSearchProfile(tp_config) profile.init(mock.MagicMock()) profile._profile_helper = profile_helper @@ -138,7 +138,7 @@ class TestProxBinSearchProfile(unittest.TestCase): profile_helper = mock.MagicMock() profile_helper.run_test = target - profile = ProxBinSearchProfile(tp_config) + profile = prox_binsearch.ProxBinSearchProfile(tp_config) profile.init(mock.MagicMock()) profile._profile_helper = profile_helper @@ -173,7 +173,7 @@ class TestProxBinSearchProfile(unittest.TestCase): profile_helper = mock.MagicMock() profile_helper.run_test = target - profile = ProxBinSearchProfile(tp_config) + profile = prox_binsearch.ProxBinSearchProfile(tp_config) profile.init(mock.MagicMock()) profile._profile_helper = profile_helper @@ -227,7 +227,7 @@ class TestProxBinSearchProfile(unittest.TestCase): profile_helper = mock.MagicMock() profile_helper.run_test = target - profile = ProxBinSearchProfile(tp_config) + profile = prox_binsearch.ProxBinSearchProfile(tp_config) profile.init(mock.MagicMock()) profile._profile_helper = profile_helper diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py index 6858cf70a..66f9e93ae 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# import subprocess @@ -20,19 +19,10 @@ import six import unittest from yardstick import ssh -from yardstick.common import utils -from yardstick.tests import STL_MOCKS from yardstick.benchmark.contexts import base as ctx_base - - -STLClient = mock.MagicMock() -stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) -stl_patch.start() - -if stl_patch: - from yardstick.network_services.vnf_generic.vnf.tg_ixload import IxLoadTrafficGen - from yardstick.network_services.vnf_generic.vnf.tg_ixload import IxLoadResourceHelper - from yardstick.network_services.traffic_profile.base import TrafficProfile +from yardstick.common import utils +from yardstick.network_services.vnf_generic.vnf import tg_ixload +from yardstick.network_services.traffic_profile.base import TrafficProfile NAME = "tg__1" @@ -119,167 +109,128 @@ class TestIxLoadTrafficGen(unittest.TestCase): def setUp(self): self._mock_call = mock.patch.object(subprocess, "call") self.mock_call = self._mock_call.start() - self._mock_open = mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open") + self._mock_open = mock.patch.object(tg_ixload, 'open') self.mock_open = self._mock_open.start() - self.addCleanup(self._stop_mock) def _stop_mock(self): self._mock_call.stop() self._mock_open.stop() - def test___init__(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) - self.assertIsNone(ixload_traffic_gen.resource_helper.data) + @mock.patch.object(ssh, 'SSH') + def test___init__(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + self.assertIsNone(ixload_traffic_gen.resource_helper.data) - @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') + @mock.patch.object(ssh, 'SSH') def test_collect_kpi(self, *args): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen.scenario_helper.scenario_cfg = { + 'nodes': {ixload_traffic_gen.name: "mock"} + } + ixload_traffic_gen.data = {} + result = ixload_traffic_gen.collect_kpi() - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) - ixload_traffic_gen.scenario_helper.scenario_cfg = { - 'nodes': {ixload_traffic_gen.name: "mock"} - } - ixload_traffic_gen.data = {} - restult = ixload_traffic_gen.collect_kpi() - expected = { - 'physical_node': 'mock_node', - 'collect_stats': {}, - } - self.assertEqual(expected, restult) + expected = { + 'physical_node': 'mock_node', + 'collect_stats': {}} + self.assertEqual(expected, result) - def test_listen_traffic(self): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) - self.assertIsNone(ixload_traffic_gen.listen_traffic({})) + @mock.patch.object(ssh, 'SSH') + def test_listen_traffic(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + self.assertIsNone(ixload_traffic_gen.listen_traffic({})) @mock.patch.object(utils, 'find_relative_file') @mock.patch.object(utils, 'makedirs') - @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call") @mock.patch.object(ctx_base.Context, 'get_context_from_server') @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil") - def test_instantiate(self, shutil, *args): - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh_mock.run = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) - scenario_cfg = {'tc': "nsb_test_case", - 'ixia_profile': "ixload.cfg", - 'task_path': "/path/to/task"} - ixload_traffic_gen.RESULTS_MOUNT = "/tmp/result" - shutil.copy = mock.Mock() - scenario_cfg.update({'options': {'packetsize': 64, 'traffic_type': 4, - 'rfc2544': {'allowed_drop_rate': '0.8 - 1'}, - 'vnf__1': {'rules': 'acl_1rule.yaml', - 'vnf_config': {'lb_config': 'SW', - 'lb_count': 1, - 'worker_config': - '1C/1T', - 'worker_threads': 1}} - }}) - scenario_cfg.update({ - 'nodes': {ixload_traffic_gen.name: "mock"} - }) - with mock.patch.object(six.moves.builtins, 'open', - create=True) as mock_open: - mock_open.return_value = mock.MagicMock() - ixload_traffic_gen.instantiate(scenario_cfg, {}) + @mock.patch.object(ssh, 'SSH') + def test_instantiate(self, mock_shutil, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + scenario_cfg = {'tc': "nsb_test_case", + 'ixia_profile': "ixload.cfg", + 'task_path': "/path/to/task"} + ixload_traffic_gen.RESULTS_MOUNT = "/tmp/result" + mock_shutil.copy = mock.Mock() + scenario_cfg.update({'options': {'packetsize': 64, 'traffic_type': 4, + 'rfc2544': {'allowed_drop_rate': '0.8 - 1'}, + 'vnf__1': {'rules': 'acl_1rule.yaml', + 'vnf_config': {'lb_config': 'SW', + 'lb_count': 1, + 'worker_config': + '1C/1T', + 'worker_threads': 1}} + }}) + scenario_cfg.update({ + 'nodes': {ixload_traffic_gen.name: "mock"} + }) + with mock.patch.object(six.moves.builtins, 'open', + create=True) as mock_open: + mock_open.return_value = mock.MagicMock() + ixload_traffic_gen.instantiate(scenario_cfg, {}) - @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.min") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil") - def test_run_traffic(self, shutil, *args): + @mock.patch.object(ssh, 'SSH') + def test_run_traffic(self, *args): mock_traffic_profile = mock.Mock(autospec=TrafficProfile) mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh_mock.run = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vnfd["mgmt-interface"].update({"tg-config": {}}) - vnfd["mgmt-interface"]["tg-config"].update({"ixchassis": - "1.1.1.1"}) - vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path": - "/root"}) - sut = IxLoadTrafficGen(NAME, vnfd) - sut.connection = mock.Mock() - sut.connection.run = mock.Mock() - sut._traffic_runner = mock.Mock(return_value=0) - shutil.copy = mock.Mock() - result = sut.run_traffic(mock_traffic_profile) - self.assertIsNone(result) + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vnfd["mgmt-interface"].update({"tg-config": {}}) + vnfd["mgmt-interface"]["tg-config"].update({"ixchassis": + "1.1.1.1"}) + vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path": + "/root"}) + sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + sut.connection = mock.Mock() + sut.connection.run = mock.Mock() + sut._traffic_runner = mock.Mock(return_value=0) + result = sut.run_traffic(mock_traffic_profile) + self.assertIsNone(result) - @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.min") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len") @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil") - def test_run_traffic_csv(self, shutil, *args): + @mock.patch.object(ssh, 'SSH') + def test_run_traffic_csv(self, *args): mock_traffic_profile = mock.Mock(autospec=TrafficProfile) mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "", "")) - ssh_mock.run = \ - mock.Mock(return_value=(0, "", "")) - ssh.from_node.return_value = ssh_mock - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vnfd["mgmt-interface"].update({"tg-config": {}}) - vnfd["mgmt-interface"]["tg-config"].update({"ixchassis": - "1.1.1.1"}) - vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path": - "/root"}) - sut = IxLoadTrafficGen(NAME, vnfd) - sut.connection = mock.Mock() - sut.connection.run = mock.Mock() - sut._traffic_runner = mock.Mock(return_value=0) - shutil.copy = mock.Mock() - subprocess.call(["touch", "/tmp/1.csv"]) - sut.rel_bin_path = mock.Mock(return_value="/tmp/*.csv") - result = sut.run_traffic(mock_traffic_profile) - self.assertIsNone(result) + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vnfd["mgmt-interface"].update({"tg-config": {}}) + vnfd["mgmt-interface"]["tg-config"].update({"ixchassis": + "1.1.1.1"}) + vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path": + "/root"}) + sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd) + sut.connection = mock.Mock() + sut.connection.run = mock.Mock() + sut._traffic_runner = mock.Mock(return_value=0) + sut.rel_bin_path = mock.Mock(return_value="/tmp/*.csv") + result = sut.run_traffic(mock_traffic_profile) + self.assertIsNone(result) - @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call") @mock.patch.object(ssh, 'SSH') def test_terminate(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) self.assertIsNone(ixload_traffic_gen.terminate()) - @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call") @mock.patch.object(ssh, 'SSH') - def test_parse_csv_read(self, mock_ssh, *args): + def test_parse_csv_read(self, mock_ssh): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] kpi_data = { 'HTTP Total Throughput (Kbps)': 1, @@ -294,14 +245,14 @@ class TestIxLoadTrafficGen(unittest.TestCase): mock_ssh_type.execute.return_value = 0, "", "" mock_ssh.from_node.return_value = mock_ssh_type - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) result = ixload_traffic_gen.resource_helper.result ixload_traffic_gen.resource_helper.parse_csv_read(http_reader) - for key_left, key_right in IxLoadResourceHelper.KPI_LIST.items(): + for key_left, key_right in ( + tg_ixload.IxLoadResourceHelper.KPI_LIST.items()): self.assertEqual(result[key_left][-1], int(kpi_data[key_right])) - @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call") @mock.patch.object(ssh, 'SSH') def test_parse_csv_read_value_error(self, mock_ssh, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] @@ -317,13 +268,12 @@ class TestIxLoadTrafficGen(unittest.TestCase): mock_ssh_type.execute.return_value = 0, "", "" mock_ssh.from_node.return_value = mock_ssh_type - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) init_value = ixload_traffic_gen.resource_helper.result ixload_traffic_gen.resource_helper.parse_csv_read(http_reader) self.assertDictEqual(ixload_traffic_gen.resource_helper.result, init_value) - @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call") @mock.patch.object(ssh, 'SSH') def test_parse_csv_read_error(self, mock_ssh, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] @@ -338,7 +288,7 @@ class TestIxLoadTrafficGen(unittest.TestCase): mock_ssh_type.execute.return_value = 0, "", "" mock_ssh.from_node.return_value = mock_ssh_type - ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) with self.assertRaises(KeyError): ixload_traffic_gen.resource_helper.parse_csv_read(http_reader) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py index f91852f74..c1664f2f0 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py @@ -11,35 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# from multiprocessing import Process, Queue -import os import time import mock from six.moves import configparser import unittest -from yardstick.tests import STL_MOCKS -from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import FileAbsPath -from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh -from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper -from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper from yardstick.benchmark.contexts import base as ctx_base - - -SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper' - -STLClient = mock.MagicMock() -stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) -stl_patch.start() - -if stl_patch: - from yardstick.network_services.vnf_generic.vnf.vpe_vnf import ConfigCreate - from yardstick.network_services.nfvi.resource import ResourceProfile - from yardstick.network_services.vnf_generic.vnf.vpe_vnf import \ - VpeApproxVnf, VpeApproxSetupEnvHelper +from yardstick.network_services.nfvi.resource import ResourceProfile +from yardstick.network_services.vnf_generic.vnf import base as vnf_base +from yardstick.network_services.vnf_generic.vnf import sample_vnf +from yardstick.network_services.vnf_generic.vnf import vpe_vnf +from yardstick.tests.unit.network_services.vnf_generic.vnf import test_base TEST_FILE_YAML = 'nsb_test_case.yaml' @@ -48,7 +33,7 @@ NAME = 'vnf_1' PING_OUTPUT_1 = "Pkts in: 101\r\n\tPkts dropped by AH: 100\r\n\tPkts dropped by other: 100" -MODULE_PATH = FileAbsPath(__file__) +MODULE_PATH = test_base.FileAbsPath(__file__) get_file_abspath = MODULE_PATH.get_path @@ -156,20 +141,20 @@ class TestConfigCreate(unittest.TestCase): } def test___init__(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) self.assertEqual(config_create.uplink_ports, ['xe0']) self.assertEqual(config_create.downlink_ports, ['xe1']) self.assertEqual(config_create.socket, 2) def test_dpdk_port_to_link_id(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) self.assertEqual(config_create.dpdk_port_to_link_id_map, {'xe0': 0, 'xe1': 1}) def test_vpe_initialize(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) config = configparser.ConfigParser() config_create.vpe_initialize(config) self.assertEqual(config.get('EAL', 'log_level'), '0') @@ -179,16 +164,16 @@ class TestConfigCreate(unittest.TestCase): self.assertEqual(config.get('MEMPOOL1', 'pool_size'), '2M') def test_vpe_rxq(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) config = configparser.ConfigParser() config_create.downlink_ports = ['xe0'] config_create.vpe_rxq(config) self.assertEqual(config.get('RXQ0.0', 'mempool'), 'MEMPOOL1') def test_get_sink_swq(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) config = configparser.ConfigParser() config.add_section('PIPELINE0') config.set('PIPELINE0', 'key1', 'value1') @@ -205,8 +190,8 @@ class TestConfigCreate(unittest.TestCase): self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key5', 5), 'SWQ0 SINK1') def test_generate_vpe_script(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - vpe_config_vnf = ConfigCreate(vnfd_helper, 2) + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + vpe_config_vnf = vpe_vnf.ConfigCreate(vnfd_helper, 2) intf = [ { "name": 'xe1', @@ -230,15 +215,34 @@ class TestConfigCreate(unittest.TestCase): self.assertNotEqual(result, '') def test_create_vpe_config(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - config_create = ConfigCreate(vnfd_helper, 23) - config_create.downlink_ports = ['xe1'] + vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) + config_create = vpe_vnf.ConfigCreate(vnfd_helper, 23) config_create.uplink_ports = ['xe1'] - curr_path = os.path.dirname(os.path.abspath(__file__)) - vpe_cfg = "samples/vnf_samples/nsut/vpe/vpe_config" - vnf_cfg = os.path.join(curr_path, "../../../../..", vpe_cfg) - config_create.create_vpe_config(vnf_cfg) - os.system("git checkout -- %s" % vnf_cfg) + with mock.patch.object(config_create, 'vpe_upstream') as mock_up, \ + mock.patch.object(config_create, 'vpe_downstream') as \ + mock_down, \ + mock.patch.object(config_create, 'vpe_tmq') as mock_tmq, \ + mock.patch.object(config_create, 'vpe_initialize') as \ + mock_ini, \ + mock.patch.object(config_create, 'vpe_rxq') as mock_rxq: + mock_ini_obj = mock.Mock() + mock_rxq_obj = mock.Mock() + mock_up_obj = mock.Mock() + mock_down_obj = mock.Mock() + mock_tmq_obj = mock.Mock() + mock_ini.return_value = mock_ini_obj + mock_rxq.return_value = mock_rxq_obj + mock_up.return_value = mock_up_obj + mock_down.return_value = mock_down_obj + mock_tmq.return_value = mock_tmq_obj + config_create.create_vpe_config('fake_config_file') + + mock_rxq.assert_called_once_with(mock_ini_obj) + mock_up.assert_called_once_with('fake_config_file', 0) + mock_down.assert_called_once_with('fake_config_file', 0) + mock_tmq.assert_called_once_with(mock_down_obj, 0) + mock_up_obj.write.assert_called_once() + mock_tmq_obj.write.assert_called_once() class TestVpeApproxVnf(unittest.TestCase): @@ -410,7 +414,7 @@ class TestVpeApproxVnf(unittest.TestCase): 'interfaces': { 'xe0': { 'local_iface_name': 'ens513f0', - 'vld_id': VpeApproxVnf.DOWNLINK, + 'vld_id': vpe_vnf.VpeApproxVnf.DOWNLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.40.20', 'dst_mac': '00:00:00:00:00:01', @@ -444,7 +448,7 @@ class TestVpeApproxVnf(unittest.TestCase): 'interfaces': { 'xe0': { 'local_iface_name': 'ens785f0', - 'vld_id': VpeApproxVnf.UPLINK, + 'vld_id': vpe_vnf.VpeApproxVnf.UPLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.100.20', 'dst_mac': '00:00:00:00:00:02', @@ -475,7 +479,7 @@ class TestVpeApproxVnf(unittest.TestCase): 'interfaces': { 'xe0': { 'local_iface_name': 'ens786f0', - 'vld_id': VpeApproxVnf.UPLINK, + 'vld_id': vpe_vnf.VpeApproxVnf.UPLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.100.19', 'dst_mac': '00:00:00:00:00:04', @@ -487,7 +491,7 @@ class TestVpeApproxVnf(unittest.TestCase): }, 'xe1': { 'local_iface_name': 'ens786f1', - 'vld_id': VpeApproxVnf.DOWNLINK, + 'vld_id': vpe_vnf.VpeApproxVnf.DOWNLINK, 'netmask': '255.255.255.0', 'local_ip': '152.16.40.19', 'dst_mac': '00:00:00:00:00:03', @@ -545,20 +549,21 @@ class TestVpeApproxVnf(unittest.TestCase): self._mock_time_sleep.stop() def test___init__(self): - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) self.assertIsNone(vpe_approx_vnf._vnf_process) - @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') - @mock.patch(SSH_HELPER) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_collect_kpi_sa_not_running(self, ssh, *args): - mock_ssh(ssh) + test_base.mock_ssh(ssh) resource = mock.Mock(autospec=ResourceProfile) resource.check_if_system_agent_running.return_value = 1, '' resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234} resource.check_if_system_agent_running.return_value = (1, None) - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {vpe_approx_vnf.name: "mock"} } @@ -577,16 +582,17 @@ class TestVpeApproxVnf(unittest.TestCase): } self.assertEqual(vpe_approx_vnf.collect_kpi(), expected) - @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') - @mock.patch(SSH_HELPER) + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_collect_kpi_sa_running(self, ssh, *args): - mock_ssh(ssh) + test_base.mock_ssh(ssh) resource = mock.Mock(autospec=ResourceProfile) resource.check_if_system_agent_running.return_value = 0, '1234' resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234} - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {vpe_approx_vnf.name: "mock"} } @@ -605,20 +611,20 @@ class TestVpeApproxVnf(unittest.TestCase): } self.assertEqual(vpe_approx_vnf.collect_kpi(), expected) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_vnf_execute(self, ssh): - mock_ssh(ssh) - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + test_base.mock_ssh(ssh) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.q_in = mock.MagicMock() vpe_approx_vnf.q_out = mock.MagicMock() vpe_approx_vnf.q_out.qsize = mock.Mock(return_value=0) self.assertEqual(vpe_approx_vnf.vnf_execute("quit", 0), '') - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_run_vpe(self, ssh): - mock_ssh(ssh) + test_base.mock_ssh(ssh) - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML) vpe_approx_vnf.vnf_cfg = { 'lb_config': 'SW', @@ -647,11 +653,11 @@ class TestVpeApproxVnf(unittest.TestCase): @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig") @mock.patch("yardstick.network_services.vnf_generic.vnf.vpe_vnf.ConfigCreate") @mock.patch("six.moves.builtins.open") - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_build_config(self, ssh, *args): - mock_ssh(ssh) - vpe_approx_vnf = VpeApproxSetupEnvHelper(mock.MagicMock(), - mock.MagicMock(), mock.MagicMock()) + test_base.mock_ssh(ssh) + vpe_approx_vnf = vpe_vnf.VpeApproxSetupEnvHelper( + mock.MagicMock(), mock.MagicMock(), mock.MagicMock()) vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML) vpe_approx_vnf.generate_port_pairs = mock.Mock() vpe_approx_vnf.vnf_cfg = { @@ -687,9 +693,9 @@ class TestVpeApproxVnf(unittest.TestCase): expected = 'sudo tool_path -p 0x3 -f /tmp/vpe_config -s /tmp/vpe_script --hwlb 3' self.assertEqual(vpe_approx_vnf.build_config(), expected) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate(self, ssh): - mock_ssh(ssh) + test_base.mock_ssh(ssh) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = True @@ -701,18 +707,19 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.q_out = mock_q_out - vpe_approx_vnf.queue_wrapper = mock.Mock(autospec=QueueFileWrapper) + vpe_approx_vnf.queue_wrapper = mock.Mock( + autospec=vnf_base.QueueFileWrapper) vpe_approx_vnf.resource_helper.resource = mock_resource vpe_approx_vnf.q_out.put("pipeline>") self.assertEqual(vpe_approx_vnf.wait_for_instantiate(), 432) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate_fragmented(self, ssh): - mock_ssh(ssh) + test_base.mock_ssh(ssh) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = True @@ -725,17 +732,18 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.q_out = mock_q_out - vpe_approx_vnf.queue_wrapper = mock.Mock(autospec=QueueFileWrapper) + vpe_approx_vnf.queue_wrapper = mock.Mock( + autospec=vnf_base.QueueFileWrapper) vpe_approx_vnf.resource_helper.resource = mock_resource self.assertEqual(vpe_approx_vnf.wait_for_instantiate(), 432) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate_crash(self, ssh): - mock_ssh(ssh, exec_result=(1, "", "")) + test_base.mock_ssh(ssh, exec_result=(1, "", "")) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = False @@ -743,7 +751,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.resource_helper.resource = mock_resource @@ -752,9 +760,9 @@ class TestVpeApproxVnf(unittest.TestCase): self.assertIn('VNF process died', str(raised.exception)) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate_panic(self, ssh): - mock_ssh(ssh, exec_result=(1, "", "")) + test_base.mock_ssh(ssh, exec_result=(1, "", "")) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = True @@ -762,7 +770,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.resource_helper.resource = mock_resource @@ -772,9 +780,9 @@ class TestVpeApproxVnf(unittest.TestCase): self.assertIn('Error starting', str(raised.exception)) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_wait_for_instantiate_panic_fragmented(self, ssh): - mock_ssh(ssh, exec_result=(1, "", "")) + test_base.mock_ssh(ssh, exec_result=(1, "", "")) mock_process = mock.Mock(autospec=Process) mock_process.is_alive.return_value = True @@ -787,7 +795,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.q_out = mock_q_out vpe_approx_vnf.resource_helper.resource = mock_resource @@ -797,11 +805,11 @@ class TestVpeApproxVnf(unittest.TestCase): self.assertIn('Error starting', str(raised.exception)) - @mock.patch(SSH_HELPER) + @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_terminate(self, ssh): - mock_ssh(ssh) + test_base.mock_ssh(ssh) - vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0) + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock.MagicMock() vpe_approx_vnf._resource_collect_stop = mock.Mock() vpe_approx_vnf.resource_helper = mock.MagicMock() |