aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.dockerignore7
-rw-r--r--INFO2
-rw-r--r--cleanup/__init__.py13
-rw-r--r--client/__init__.py13
-rw-r--r--conftest.py0
-rw-r--r--docker/Dockerfile7
-rw-r--r--docs/testing/user/userguide/advanced.rst93
-rw-r--r--docs/testing/user/userguide/conf.py2
-rw-r--r--docs/testing/user/userguide/index.rst2
-rw-r--r--docs/testing/user/userguide/quickstart_docker.rst51
-rw-r--r--docs/testing/user/userguide/server.rst3
-rw-r--r--docs/testing/user/userguide/sriov.rst2
-rw-r--r--nfvbench/__init__.py4
-rw-r--r--nfvbench/cfg.default.yaml19
-rw-r--r--nfvbench/chain_clients.py53
-rw-r--r--nfvbench/chain_managers.py10
-rw-r--r--nfvbench/compute.py7
-rw-r--r--nfvbench/connection.py725
-rw-r--r--nfvbench/nfvbench.py3
-rw-r--r--requirements.txt2
-rw-r--r--setup.py3
-rw-r--r--test-requirements.txt2
-rw-r--r--tox.ini1
23 files changed, 148 insertions, 876 deletions
diff --git a/.dockerignore b/.dockerignore
index 642121f..a437a04 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,2 +1,7 @@
requirements-dev.txt
-.gitignore \ No newline at end of file
+.gitignore
+.gitreview
+nfvbenchvm/
+test/
+.tox/
+.cache/ \ No newline at end of file
diff --git a/INFO b/INFO
index 9d89699..8acaf87 100644
--- a/INFO
+++ b/INFO
@@ -21,7 +21,7 @@ Link to TSC approval of the project:
Acknowledgements
The development of NFVbench started in Summer 2016 at Cisco by this small team of dedicated people
-before being open sourced in Summer 2017 to OPNFV following more than 500 commits:
+before being open sourced in Spring 2017 to OPNFV following more than 500 commits:
Jan Balaz (aka Johnny)
Stefano Chiesa Suryanto
Yichen Wang
diff --git a/cleanup/__init__.py b/cleanup/__init__.py
index e69de29..fcaa79b 100644
--- a/cleanup/__init__.py
+++ b/cleanup/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017 Cisco Systems, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License. \ No newline at end of file
diff --git a/client/__init__.py b/client/__init__.py
index e69de29..fcaa79b 100644
--- a/client/__init__.py
+++ b/client/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017 Cisco Systems, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License. \ No newline at end of file
diff --git a/conftest.py b/conftest.py
deleted file mode 100644
index e69de29..0000000
--- a/conftest.py
+++ /dev/null
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 1bc9246..9333588 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,9 +1,8 @@
# docker file for creating a container that has nfvbench installed and ready to use
FROM ubuntu:16.04
-COPY . /nfvbench
-
-ENV TREX_VER "v2.27"
+ENV TREX_VER "v2.29"
+ENV NFVBENCH_TAG "1.0.7"
RUN apt-get update && apt-get install -y \
git \
@@ -26,6 +25,8 @@ RUN apt-get update && apt-get install -y \
&& sed -i -e "s/2048 /512 /" -e "s/2048\"/512\"/" /opt/trex/$TREX_VER/trex-cfg \
&& pip install -U pip pbr \
&& pip install -U setuptools \
+ && cd / \
+ && git clone --depth 1 -b $NFVBENCH_TAG https://gerrit.opnfv.org/gerrit/nfvbench \
&& cd /nfvbench && pip install -e . \
&& python ./docker/cleanup_generators.py \
&& rm -rf /nfvbench/.git \
diff --git a/docs/testing/user/userguide/advanced.rst b/docs/testing/user/userguide/advanced.rst
index 6823cb1..00bac53 100644
--- a/docs/testing/user/userguide/advanced.rst
+++ b/docs/testing/user/userguide/advanced.rst
@@ -11,7 +11,7 @@ Below are shown the most common and useful use-cases and explained some fields f
How to change any NFVbench run configuration (CLI)
--------------------------------------------------
-NFVbench always starts with a default configuration which can further be partially refined (overridden) by the user from the CLI or from REST requests.
+NFVbench always starts with a default configuration which can further be refined (overridden) by the user from the CLI or from REST requests.
At first have a look at the default config:
@@ -39,9 +39,9 @@ as seen from inside the container (in this example, we assume the current direct
The same -c option also accepts any valid yaml or json string to override certain parameters without having to create a configuration file.
-NFVbench also provides many configuration options as optional arguments. For example the number of flows can be specified using the --flow-count option.
+NFVbench provides many configuration options as optional arguments. For example the number of flows can be specified using the --flow-count option.
-For example, flow count can be specified in any of 3 ways:
+The flow count option can be specified in any of 3 ways:
- by providing a confguration file that has the flow_count value to use (-c myconfig.yaml and myconfig.yaml contains 'flow_count: 100k')
- by passing that yaml paremeter inline (-c "flow_count: 100k") or (-c "{flow_count: 100k}")
@@ -63,10 +63,9 @@ For example, this will only display the running configuration (without actually
Connectivity and Configuration Check
------------------------------------
-NFVbench allows to test connectivity to devices used with selected flow test, for example PVP.
-It runs the whole test, but without actually sending any traffic or influencing interface counters.
-It is also a good way to check if everything is configured properly in the config file and what versions of components are used.
-
+NFVbench allows to test connectivity to devices used with the selected packet path.
+It runs the whole test, but without actually sending any traffic.
+It is also a good way to check if everything is configured properly in the configuration file and what versions of components are used.
To verify everything works without sending any traffic, use the --no-traffic option:
@@ -83,13 +82,13 @@ Used parameters:
Fixed Rate Run
--------------
-Fixed rate run is the most basic type of NFVbench usage. It is usually used to verify that some amount of packets can pass network components in selected flow.
+Fixed rate run is the most basic type of NFVbench usage. It can be used to measure the drop rate with a fixed transmission rate of packets.
-The first example shows how to run PVP flow (default flow) with multiple different settings:
+This example shows how to run the PVP packet path (which is the default packet path) with multiple different settings:
.. code-block:: bash
- nfvbench -c nfvbench.cfg --no-reset --no-cleanup --rate 100000pps --duration 30 --interval 15 --json results.json
+ nfvbench -c nfvbench.cfg --no-cleanup --rate 100000pps --duration 30 --interval 15 --json results.json
Used parameters:
@@ -100,16 +99,16 @@ Used parameters:
* ``--interval 15`` : stats are checked and shown periodically (in seconds) in this interval when traffic is flowing
* ``--json results.json`` : collected data are stored in this file after run is finished
-.. note:: It is your responsibility to clean up resources if needed when ``--no-cleanup`` parameter is used. You can use the nfvbench_cleanup helper script for that purpose
+.. note:: It is your responsibility to clean up resources if needed when ``--no-cleanup`` parameter is used. You can use the nfvbench_cleanup helper script for that purpose.
-The ``--json`` parameter makes it easy to store NFVbench results. To display collected results in a table form, do:
+The ``--json`` parameter makes it easy to store NFVbench results. The --show-summary (or -ss) option can be used to display the results in a json results file in a text tabular format:
.. code-block:: bash
- nfvbench --show-summary results.json # or shortcut -ss results.json
+ nfvbench --show-summary results.json
-Second example aims to show how to specify which supported flow to run:
+This example shows how to specify a different packet path:
.. code-block:: bash
@@ -120,7 +119,7 @@ Used parameters:
* ``-c nfvbench.cfg`` : path to the config file
* ``--rate 1Mbps`` : defines rate of packets sent by traffic generator
* ``--inter-node`` : VMs are created on different compute nodes, works only with PVVP flow
-* ``--service-chain PVVP`` or ``-sc PVVP`` : specifies type of flow to use, default is PVP
+* ``--service-chain PVVP`` or ``-sc PVVP`` : specifies the type of service chain (or packet path) to use
.. note:: When parameter ``--inter-node`` is not used or there aren't enough compute nodes, VMs are on the same compute node.
@@ -135,20 +134,22 @@ Parameter ``--rate`` accepts different types of values:
* bits per second (bps, kbps, Mbps, Gbps), e.g. ``1Gbps``, ``1000bps``
* NDR/PDR (ndr, pdr, ndr_pdr), e.g. ``ndr_pdr``
-The last mentioned value, NDR/PDR, is default one and its usage is covered more below.
-
+NDR/PDR is the default rate when not specified.
NDR and PDR
-----------
-NDR and PDR test is used to determine performance of your setup, maximum packets throughput.
+The NDR and PDR test is used to determine the maximum throughput performance of the system under test
+following guidelines defined in RFC-2544:
-* NDR (No Drop Rate): how many packets can be sent so (almost) none of them are dropped
-* PDR (Partial Drop Rate): how many packets can be sent so drop rate is below given limit
+* NDR (No Drop Rate): maximum packet rate sent without dropping any packet
+* PDR (Partial Drop Rate): maximum packet rate sent while allowing a given maximum drop rate
-Config file contains section where settings for NDR/PDR can be set.
-Increasing number of attempts helps to minimize a chance of traffic hiccups influencing result.
-Other way of increasing precision is to specify longer duration for traffic to run.
+The NDR search can also be relaxed to allow some very small amount of drop rate (lower than the PDR maximum drop rate).
+NFVbench will measure the NDR and PDR values by driving the traffic generator through multiple iterations
+at different transmission rates using a binary search algorithm.
+
+The configuration file contains section where settings for NDR/PDR can be set.
.. code-block:: bash
@@ -166,13 +167,13 @@ Other way of increasing precision is to specify longer duration for traffic to r
# or PDR should be within `load_epsilon` difference than the one calculated.
load_epsilon: 0.1
-Because NDR/PDR is the default ``--rate`` value, it's possible to run NFVbench simply like this:
+Because NDR/PDR is the default ``--rate`` value, it is possible to run NFVbench simply like this:
.. code-block:: bash
nfvbench -c nfvbench.cfg
-Other custom run:
+Other possible run options:
.. code-block:: bash
@@ -188,19 +189,19 @@ Used parameters:
Multichain
----------
-NFVbench allows to run multiple chains at the same time. For example it is possible to run PVP service chain N-times,
+NFVbench allows to run multiple chains at the same time. For example it is possible to stage the PVP service chain N-times,
where N can be as much as your compute power can scale. With N = 10, NFVbench will spawn 10 VMs as a part of 10 simultaneous PVP chains.
-Number of chains is specified by ``--service-chain-count`` or ``-scc`` flag, default value is 1.
-For example to run NFVbench with 3 PVP chains use command:
+The number of chains is specified by ``--service-chain-count`` or ``-scc`` flag with a default value of 1.
+For example to run NFVbench with 3 PVP chains:
.. code-block:: bash
nfvbench -c nfvbench.cfg --rate 10000pps -scc 3
-It is not necessary to specify service chain because PVP is set as default. PVP service chains will have 3 VMs in 3 chains with this configuration.
+It is not necessary to specify the service chain type (-sc) because PVP is set as default. The PVP service chains will have 3 VMs in 3 chains with this configuration.
If ``-sc PVVP`` is specified instead, there would be 6 VMs in 3 chains as this service chain has 2 VMs per chain.
-Both **single run** or **NDR/PDR** can be run as multichain. Running multichain is a scenario closer to a real life situation than just simple run.
+Both **single run** or **NDR/PDR** can be run as multichain. Running multichain is a scenario closer to a real life situation than runs with a single chain.
External Chain
@@ -226,31 +227,26 @@ To run NFVbench on such external service chains:
.. image:: images/extchain-config.svg
-The L3 router function must be enabled in the VNF and configured to:
+L3 routing must be enabled in the VNF and configured to:
- reply to ARP requests to its public IP addresses on both left and right networks
- route packets from each set of remote devices toward the appropriate dest gateway IP in the traffic generator using 2 static routes (as illustrated in the diagram)
Upon start, NFVbench will:
- first retrieve the properties of the left and right networks using Neutron APIs,
-- extract the underlying network ID (either VLAN ID or VNI if VxLAN is used),
-- then program the TOR to stitch the 2 interfaces from the traffic generator into each end of the service chain,
-- then generate and measure traffic.
+- extract the underlying network ID (typically VLAN segmentation ID),
+- generate packets with the proper VLAN ID and measure traffic.
Note that in the case of multiple chains, all chains end interfaces must be connected to the same two left and right networks.
The traffic will be load balanced across the corresponding gateway IP of these external service chains.
-.. note:: By default, interfaces configuration (TOR, VTS, etc.) will be run by NFVbench but these can be skipped by using ``--no-int-config`` flag.
-
Multiflow
---------
NFVbench always generates L3 packets from the traffic generator but allows the user to specify how many flows to generate.
-A flow is identified by a unique src/dest MAC IP and port tuple that is sent by the traffic generator. Note that from a vswitch point of view, the
-number of flows seen will be higher as it will be at least 4 times the number of flows sent by the traffic generator
-(add reverse direction of vswitch to traffic generator, add flow to VM and flow from VM).
-
+A flow is identified by a unique src/dest MAC IP and port tuple that is sent by the traffic generator. Flows are
+generated by ranging the IP adresses but using a small fixed number of MAC addresses.
The number of flows will be spread roughly even between chains when more than 1 chain is being tested.
For example, for 11 flows and 3 chains, number of flows that will run for each chain will be 3, 4, and 4 flows respectively.
@@ -262,6 +258,9 @@ To run NFVbench with 3 chains and 100 flows, use the following command:
nfvbench -c nfvbench.cfg --rate 10000pps -scc 3 -fc 100
+Note that from a vswitch point of view, the
+number of flows seen will be higher as it will be at least 4 times the number of flows sent by the traffic generator
+(add flow to VM and flow from VM).
IP addresses generated can be controlled with the following NFVbench configuration options:
@@ -286,12 +285,10 @@ The corresponding ``step`` is used for ranging the IP addresses from the `ip_add
0.0.0.1 is the default step for all IP ranges. In ``ip_addrs``, 'random' can be configured which tells NFVBench to generate random src/dst IP pairs in the traffic stream.
-Traffic Config via CLI
-----------------------
-
-While traffic configuration can modified using the config file, it became a hassle to have to change the config file everytime you need to change traffic config.
+Traffic Configuration via CLI
+-----------------------------
-Traffic config can be overridden with the CLI options.
+While traffic configuration can be modified using the configuration file, it can be inconvenient to have to change the configuration file everytime you need to change a traffic configuration option. Traffic configuration options can be overridden with a few CLI options.
Here is an example of configuring traffic via CLI:
@@ -299,7 +296,7 @@ Here is an example of configuring traffic via CLI:
nfvbench --rate 10kpps --service-chain-count 2 -fs 64 -fs IMIX -fs 1518 --unidir
-This command will run NFVbench with two streams with unidirectional flow for three packet sizes 64B, IMIX, and 1518B.
+This command will run NFVbench with a unidirectional flow for three packet sizes 64B, IMIX, and 1518B.
Used parameters:
@@ -313,8 +310,8 @@ MAC Addresses
-------------
NFVbench will dicover the MAC addresses to use for generated frames using:
-- either OpenStack discovery (find the MAC of an existing VM) if the loopback VM is configured to run L2 forwarding
-- or using dynamic ARP discovery (find MAC from IP) if the loopback VM is configured to run L3 routing or in the case of external chains.
+- either OpenStack discovery (find the MAC of an existing VM) in the case of PVP and PVVP service chains
+- or using dynamic ARP discovery (find MAC from IP) in the case of external chains.
Cleanup Script
--------------
diff --git a/docs/testing/user/userguide/conf.py b/docs/testing/user/userguide/conf.py
index 020533c..0aae169 100644
--- a/docs/testing/user/userguide/conf.py
+++ b/docs/testing/user/userguide/conf.py
@@ -1,4 +1,4 @@
-# -*- coding: utf-8 -*-
+# Copyright 2017 Cisco Systems, Inc. All rights reserved.
#
# NFVBench documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 29 14:25:18 2016.
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index fa9d7d0..b6ae833 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -17,7 +17,7 @@ It is designed to be easy to install and easy to use by non experts (no need to
Table of Content
----------------
.. toctree::
- :maxdepth: 2
+ :maxdepth: 3
readme
installation
diff --git a/docs/testing/user/userguide/quickstart_docker.rst b/docs/testing/user/userguide/quickstart_docker.rst
index 2c9f762..98d4d25 100644
--- a/docs/testing/user/userguide/quickstart_docker.rst
+++ b/docs/testing/user/userguide/quickstart_docker.rst
@@ -154,64 +154,23 @@ Edit the nfvbench.cfg file to only keep those properties that need to be modifie
6. Run NFVbench
---------------
-To do a single run at 5000pps bi-directional using the PVP packet path:
+To do a single run at 10,000pps bi-directional (or 5kpps in each direction) using the PVP packet path:
.. code-block:: bash
- nfvbench -c /tmp/nfvbench/my_nfvbench.cfg --rate 5kpps
+ nfvbench -c /tmp/nfvbench/my_nfvbench.cfg --rate 10kpps
NFVbench options used:
* ``-c /tmp/nfvbench/my_nfvbench.cfg`` : specify the config file to use (this must reflect the file path from inside the container)
-* ``--rate 5kpps`` : specify rate of packets for test using the kpps unit (thousands of packets per second)
+* ``--rate 10kpps`` : specify rate of packets for test for both directions using the kpps unit (thousands of packets per second)
This should produce a result similar to this (a simple run with the above options should take less than 5 minutes):
.. code-block:: none
- ========== nfvbench Summary ==========
- Date: 2016-10-05 21:43:30
- nfvbench version 0.0.1.dev128
- Mercury version: 5002
- Benchmarks:
- > Networks:
- > N9K version: {'10.28.108.249': {'BIOS': '07.34', 'NXOS': '7.0(3)I2(2b)'}, '10.28.108.248': {'BIOS': '07.34', 'NXOS': '7.0(3)I2(2b)'}}
- Traffic generator profile: trex-c45
- Traffic generator tool: TRex
- Traffic generator API version: {u'build_date': u'Aug 24 2016', u'version': u'v2.08', u'built_by': u'hhaim', u'build_time': u'16:32:13'}
- Flows:
- > PVP:
- VPP version: {u'sjc04-pod3-compute-6': 'v16.06-rc1~27-gd175728'}
- > Bidirectional: False
- Profile: traffic_profile_64B
-
- +-----------------+-------------+----------------------+----------------------+----------------------+
- | L2 Frame Size | Drop Rate | Avg Latency (usec) | Min Latency (usec) | Max Latency (usec) |
- +=================+=============+======================+======================+======================+
- | 64 | 0.0000% | 22.1885 | 10 | 503 |
- +-----------------+-------------+----------------------+----------------------+----------------------+
-
-
- > L2 frame size: 64
- Flow analysis duration: 70.0843 seconds
-
- Run Config:
-
- +-------------+------------------+--------------+-----------+
- | Direction | Duration (sec) | Rate | Rate |
- +=============+==================+==============+===========+
- | Forward | 60 | 1.0080 Mbps | 1,500 pps |
- +-------------+------------------+--------------+-----------+
- | Reverse | 60 | 672.0000 bps | 1 pps |
- +-------------+------------------+--------------+-----------+
-
- +----------------------+----------+-----------------+---------------+---------------+-----------------+---------------+---------------+
- | Interface | Device | Packets (fwd) | Drops (fwd) | Drop% (fwd) | Packets (rev) | Drops (rev) | Drop% (rev) |
- +======================+==========+=================+===============+===============+=================+===============+===============+
- | traffic-generator | trex | 90,063 | | | 61 | 0 | - |
- +----------------------+----------+-----------------+---------------+---------------+-----------------+---------------+---------------+
- | traffic-generator | trex | 90,063 | 0 | - | 61 | | |
- +----------------------+----------+-----------------+---------------+---------------+-----------------+---------------+---------------+
+ [TBP]
+
7. Terminating the NFVbench container
-------------------------------------
diff --git a/docs/testing/user/userguide/server.rst b/docs/testing/user/userguide/server.rst
index ebdd828..f1ab618 100644
--- a/docs/testing/user/userguide/server.rst
+++ b/docs/testing/user/userguide/server.rst
@@ -170,7 +170,6 @@ The entire default configuration can be viewed using the --show-json-config opti
"flow_count": 1,
"generic_poll_sec": 2,
"generic_retry_count": 100,
- "image_name": "nfvbenchvm",
"inter_node": false,
"internal_networks": {
"left": {
@@ -304,7 +303,7 @@ The entire default configuration can be viewed using the --show-json-config opti
],
"unidir_reverse_traffic_pps": 1,
"vlan_tagging": true,
- "vm_image_file": "file://172.29.172.152/downloads/nfvbench/nfvbenchvm-latest.qcow2",
+ "vm_image_file": "/nfvbench/nfvbenchvm-0.3.qcow2",
"vts_ncs": {
"host": null,
"password": "secret",
diff --git a/docs/testing/user/userguide/sriov.rst b/docs/testing/user/userguide/sriov.rst
index 2efb495..4898e85 100644
--- a/docs/testing/user/userguide/sriov.rst
+++ b/docs/testing/user/userguide/sriov.rst
@@ -6,7 +6,7 @@
Testing SR-IOV
==============
-NFVbench supports SR-IOV with the PVP and PVVP packet flows. SR-IOV support is not applicable for external chains since the networks have to be setup externally (and can themselves be pre-set to use SR-IOV or not).
+NFVbench supports SR-IOV with the PVP packet flow (PVVP is not supported). SR-IOV support is not applicable for external chains since the networks have to be setup externally (and can themselves be pre-set to use SR-IOV or not).
Pre-requisites
--------------
diff --git a/nfvbench/__init__.py b/nfvbench/__init__.py
index 6e88400..5fb4014 100644
--- a/nfvbench/__init__.py
+++ b/nfvbench/__init__.py
@@ -1,5 +1,5 @@
-# -*- coding: utf-8 -*-
-
+# Copyright 2017 Cisco Systems, Inc. All rights reserved.
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml
index 534b3c7..f36cbea 100644
--- a/nfvbench/cfg.default.yaml
+++ b/nfvbench/cfg.default.yaml
@@ -19,21 +19,16 @@
# option, e.g. "--interval"
-# Name of the image to use for launching the loopback VMs. This name must be
-# the exact same name used in OpenStack (as shown from 'nova image-list')
-# Can be overridden by --image or -i
-image_name: 'nfvbenchvm'
# Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd']
vm_forwarder: testpmd
-# NFVbench can automatically upload a VM image if the image named by
-# image_name is missing, for that you need to specify a file location where
-# the image can be retrieved
-#
-# To upload the image as a file, download it to preferred location
-# and prepend it with file:// like in this example:
-# file://<location of the image>
-# NFVbench (the image must have the same name as defined in image_name above).
+# By default (empty) NFVBench will try to locate a VM image file
+# from the package root directory named "nfvbench-<version>.qcow2" and
+# upload that file. The image name will be "nfvbench-<version>"
+# This can be overridden by specifying here a pathname of a file
+# that follows the same naming convention.
+# In most cases, this field should be left empty as the packaging should
+# include the proper VM image file
vm_image_file:
# Name of the flavor to use for the loopback VMs
diff --git a/nfvbench/chain_clients.py b/nfvbench/chain_clients.py
index bf51552..c6df08a 100644
--- a/nfvbench/chain_clients.py
+++ b/nfvbench/chain_clients.py
@@ -20,6 +20,7 @@ from log import LOG
from neutronclient.neutron import client as neutronclient
from novaclient.client import Client
import os
+import re
import time
@@ -35,6 +36,7 @@ class BasicStageClient(object):
def __init__(self, config, cred):
self.comp = None
self.image_instance = None
+ self.image_name = None
self.config = config
self.cred = cred
self.nets = []
@@ -229,25 +231,45 @@ class BasicStageClient(object):
return server
def _setup_resources(self):
- if not self.image_instance:
- self.image_instance = self.comp.find_image(self.config.image_name)
- if self.image_instance is None:
+ # To avoid reuploading image in server mode, check whether image_name is set or not
+ if self.image_name:
+ self.image_instance = self.comp.find_image(self.image_name)
+ if self.image_instance:
+ LOG.info("Reusing image %s" % self.image_name)
+ else:
+ image_name_search_pattern = '(nfvbenchvm-\d+(\.\d+)*).qcow2'
if self.config.vm_image_file:
- LOG.info('%s: image for VM not found, trying to upload it ...'
- % self.config.image_name)
- res = self.comp.upload_image_via_url(self.config.image_name,
+ match = re.search(image_name_search_pattern, self.config.vm_image_file)
+ if match:
+ self.image_name = match.group(1)
+ LOG.info('Using provided VM image file %s' % self.config.vm_image_file)
+ else:
+ raise StageClientException('Provided VM image file name %s must start with '
+ '"nfvbenchvm-<version>"' % self.config.vm_image_file)
+ else:
+ pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ for f in os.listdir(pkg_root):
+ if re.search(image_name_search_pattern, f):
+ self.config.vm_image_file = pkg_root + '/' + f
+ self.image_name = f.replace('.qcow2', '')
+ LOG.info('Found built-in VM image file %s' % f)
+ break
+ else:
+ raise StageClientException('Cannot find any built-in VM image file.')
+ if self.image_name:
+ self.image_instance = self.comp.find_image(self.image_name)
+ if not self.image_instance:
+ LOG.info('Uploading %s'
+ % self.image_name)
+ res = self.comp.upload_image_via_url(self.image_name,
self.config.vm_image_file)
if not res:
raise StageClientException('Error uploading image %s from %s. ABORTING.'
- % (self.config.image_name,
+ % (self.image_name,
self.config.vm_image_file))
- self.image_instance = self.comp.find_image(self.config.image_name)
- else:
- raise StageClientException('%s: image to launch VM not found. ABORTING.'
- % self.config.image_name)
-
- LOG.info('Found image %s to launch VM' % self.config.image_name)
+ LOG.info('Image %s successfully uploaded.' % self.image_name)
+ self.image_instance = self.comp.find_image(self.image_name)
self.__setup_flavor()
@@ -381,7 +403,7 @@ class BasicStageClient(object):
"""
vlans = []
for net in self.nets:
- assert(net['provider:network_type'] == 'vlan')
+ assert (net['provider:network_type'] == 'vlan')
vlans.append(net['provider:segmentation_id'])
return vlans
@@ -419,7 +441,6 @@ class BasicStageClient(object):
class EXTStageClient(BasicStageClient):
-
def __init__(self, config, cred):
super(EXTStageClient, self).__init__(config, cred)
@@ -436,7 +457,6 @@ class EXTStageClient(BasicStageClient):
class PVPStageClient(BasicStageClient):
-
def __init__(self, config, cred):
super(PVPStageClient, self).__init__(config, cred)
@@ -480,7 +500,6 @@ class PVPStageClient(BasicStageClient):
class PVVPStageClient(BasicStageClient):
-
def __init__(self, config, cred):
super(PVVPStageClient, self).__init__(config, cred)
diff --git a/nfvbench/chain_managers.py b/nfvbench/chain_managers.py
index fe3a2d4..033eb7a 100644
--- a/nfvbench/chain_managers.py
+++ b/nfvbench/chain_managers.py
@@ -85,8 +85,14 @@ class StatsManager(object):
WORKER_CLASS = self.factory.get_chain_worker(self.specs.openstack.encaps,
self.config.service_chain)
self.worker = WORKER_CLASS(self.config, self.clients, self.specs)
- self.worker.set_vlans(self.vlans)
- self._config_interfaces()
+ try:
+ self.worker.set_vlans(self.vlans)
+ self._config_interfaces()
+ except Exception as exc:
+ # since the wrorker is up and running, we need to close it
+ # in case of exception
+ self.close()
+ raise exc
def _get_data(self):
return self.worker.get_data()
diff --git a/nfvbench/compute.py b/nfvbench/compute.py
index 681a852..5806164 100644
--- a/nfvbench/compute.py
+++ b/nfvbench/compute.py
@@ -51,9 +51,7 @@ class Compute(object):
retry = 0
try:
# check image is file/url based.
- file_prefix = "file://"
- image_location = image_file.split(file_prefix)[1]
- with open(image_location) as f_image:
+ with open(image_file) as f_image:
img = self.glance_client.images.create(name=str(final_image_name),
disk_format="qcow2",
container_format="bare",
@@ -82,8 +80,7 @@ class Compute(object):
return False
except Exception:
LOG.error(traceback.format_exc())
- LOG.error("Failed while uploading the image, please make sure the "
- "cloud under test has the access to file: %s.", image_file)
+ LOG.error("Failed to upload image %s.", image_file)
return False
return True
diff --git a/nfvbench/connection.py b/nfvbench/connection.py
deleted file mode 100644
index 0ef994f..0000000
--- a/nfvbench/connection.py
+++ /dev/null
@@ -1,725 +0,0 @@
-# Copyright 2013: Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-"""High level ssh library.
-Usage examples:
-Execute command and get output:
- ssh = sshclient.SSH('root', 'example.com', port=33)
- status, stdout, stderr = ssh.execute('ps ax')
- if status:
- raise Exception('Command failed with non-zero status.')
- print stdout.splitlines()
-Execute command with huge output:
- class PseudoFile(object):
- def write(chunk):
- if 'error' in chunk:
- email_admin(chunk)
- ssh = sshclient.SSH('root', 'example.com')
- ssh.run('tail -f /var/log/syslog', stdout=PseudoFile(), timeout=False)
-Execute local script on remote side:
- ssh = sshclient.SSH('user', 'example.com')
- status, out, err = ssh.execute('/bin/sh -s arg1 arg2',
- stdin=open('~/myscript.sh', 'r'))
-Upload file:
- ssh = sshclient.SSH('user', 'example.com')
- ssh.run('cat > ~/upload/file.gz', stdin=open('/store/file.gz', 'rb'))
-Eventlet:
- eventlet.monkey_patch(select=True, time=True)
- or
- eventlet.monkey_patch()
- or
- sshclient = eventlet.import_patched("opentstack.common.sshclient")
-"""
-
-import re
-import select
-import shlex
-import socket
-import StringIO
-import subprocess
-import sys
-import threading
-import time
-
-from log import LOG
-import paramiko
-
-# from rally.openstack.common.gettextutils import _
-
-
-class ConnectionError(Exception):
- pass
-
-
-class Connection(object):
-
- '''
- A base connection class. Not intended to be constructed.
- '''
-
- def __init__(self):
- self.distro_id = None
- self.distro_id_like = None
- self.distro_version = None
- self.__get_distro()
-
- def close(self):
- pass
-
- def execute(self, cmd, stdin=None, timeout=3600):
- pass
-
- def __extract_property(self, name, input_str):
- expr = name + r'="?([\w\.]*)"?'
- match = re.search(expr, input_str)
- if match:
- return match.group(1)
- return 'Unknown'
-
- # Get the linux distro
- def __get_distro(self):
- '''cat /etc/*-release | grep ID
- Ubuntu:
- DISTRIB_ID=Ubuntu
- ID=ubuntu
- ID_LIKE=debian
- VERSION_ID="14.04"
- RHEL:
- ID="rhel"
- ID_LIKE="fedora"
- VERSION_ID="7.0"
- '''
- distro_cmd = "grep ID /etc/*-release"
- (status, distro_out, _) = self.execute(distro_cmd)
- if status:
- distro_out = ''
- self.distro_id = self.__extract_property('ID', distro_out)
- self.distro_id_like = self.__extract_property('ID_LIKE', distro_out)
- self.distro_version = self.__extract_property('VERSION_ID', distro_out)
-
- def pidof(self, proc_name):
- '''
- Return a list containing the pids of all processes of a given name
- the list is empty if there is no pid
- '''
- # the path update is necessary for RHEL
- cmd = "PATH=$PATH:/usr/sbin pidof " + proc_name
- (status, cmd_output, _) = self.execute(cmd)
- if status:
- return []
- cmd_output = cmd_output.strip()
- result = cmd_output.split()
- return result
-
- # kill pids in the given list of pids
- def kill_proc(self, pid_list):
- cmd = "kill -9 " + ' '.join(pid_list)
- self.execute(cmd)
-
- # check stats for a given path
- def stat(self, path):
- (status, cmd_output, _) = self.execute('stat ' + path)
- if status:
- return None
- return cmd_output
-
- def ping_check(self, target_ip, ping_count=2, pass_threshold=80):
- '''helper function to ping from one host to an IP address,
- for a given count and pass_threshold;
- Steps:
- ssh to the host and then ping to the target IP
- then match the output and verify that the loss% is
- less than the pass_threshold%
- Return 1 if the criteria passes
- Return 0, if it fails
- '''
- cmd = "ping -c " + str(ping_count) + " " + str(target_ip)
- (_, cmd_output, _) = self.execute(cmd)
-
- match = re.search(r'(\d*)% packet loss', cmd_output)
- pkt_loss = match.group(1)
- if int(pkt_loss) < int(pass_threshold):
- return 1
- else:
- LOG.error('Ping to %s failed: %s', target_ip, cmd_output)
- return 0
-
- def read_remote_file(self, from_path):
- '''
- Read a remote file and save it to a buffer.
- '''
- cmd = "cat " + from_path
- (status, cmd_output, _) = self.execute(cmd)
- if status:
- return None
- return cmd_output
-
- def get_host_os_version(self):
- '''
- Identify the host distribution/relase.
- '''
- os_release_file = "/etc/os-release"
- sys_release_file = "/etc/system-release"
- name = ""
- version = ""
-
- if self.stat(os_release_file):
- data = self.read_remote_file(os_release_file)
- if data is None:
- LOG.error("Failed to read file %s", os_release_file)
- return None
-
- for line in data.splitlines():
- mobj = re.match(r'NAME=(.*)', line)
- if mobj:
- name = mobj.group(1).strip("\"")
-
- mobj = re.match(r'VERSION_ID=(.*)', line)
- if mobj:
- version = mobj.group(1).strip("\"")
-
- os_name = name + " " + version
- return os_name
-
- if self.stat(sys_release_file):
- data = self.read_remote_file(sys_release_file)
- if data is None:
- LOG.error("Failed to read file %s", sys_release_file)
- return None
-
- for line in data.splitlines():
- mobj = re.match(r'Red Hat.*', line)
- if mobj:
- return mobj.group(0)
-
- return None
-
- def check_rpm_package_installed(self, rpm_pkg):
- '''
- Given a host and a package name, check if it is installed on the
- system.
- '''
- check_pkg_cmd = "rpm -qa | grep " + rpm_pkg
-
- (status, cmd_output, _) = self.execute(check_pkg_cmd)
- if status:
- return None
-
- pkg_pattern = ".*" + rpm_pkg + ".*"
- rpm_pattern = re.compile(pkg_pattern, re.IGNORECASE)
-
- for line in cmd_output.splitlines():
- mobj = rpm_pattern.match(line)
- if mobj:
- return mobj.group(0)
-
- LOG.info("%s pkg installed ", rpm_pkg)
-
- return None
-
- def get_openstack_release(self, ver_str):
- '''
- Get the release series name from the package version
- Refer to here for release tables:
- https://wiki.openstack.org/wiki/Releases
- '''
- ver_table = {"2015.1": "Kilo",
- "2014.2": "Juno",
- "2014.1": "Icehouse",
- "2013.2": "Havana",
- "2013.1": "Grizzly",
- "2012.2": "Folsom",
- "2012.1": "Essex",
- "2011.3": "Diablo",
- "2011.2": "Cactus",
- "2011.1": "Bexar",
- "2010.1": "Austin"}
-
- ver_prefix = re.search(r"20\d\d\.\d", ver_str).group(0)
- if ver_prefix in ver_table:
- return ver_table[ver_prefix]
- else:
- return "Unknown"
-
- def check_openstack_version(self):
- '''
- Identify the openstack version running on the controller.
- '''
- nova_cmd = "nova-manage --version"
- (status, _, err_output) = self.execute(nova_cmd)
-
- if status:
- return "Unknown"
-
- ver_str = err_output.strip()
- release_str = self.get_openstack_release(err_output)
- return release_str + " (" + ver_str + ")"
-
- def get_cpu_info(self):
- '''
- Get the CPU info of the controller.
- Note: Here we are assuming the controller node has the exact
- hardware as the compute nodes.
- '''
-
- cmd = 'cat /proc/cpuinfo | grep -m1 "model name"'
- (status, std_output, _) = self.execute(cmd)
- if status:
- return "Unknown"
- model_name = re.search(r":\s(.*)", std_output).group(1)
-
- cmd = 'cat /proc/cpuinfo | grep "model name" | wc -l'
- (status, std_output, _) = self.execute(cmd)
- if status:
- return "Unknown"
- cores = std_output.strip()
-
- return (cores + " * " + model_name)
-
- def get_nic_name(self, agent_type, encap, internal_iface_dict):
- '''
- Get the NIC info of the controller.
- Note: Here we are assuming the controller node has the exact
- hardware as the compute nodes.
- '''
-
- # The internal_ifac_dict is a dictionary contains the mapping between
- # hostname and the internal interface name like below:
- # {u'hh23-4': u'eth1', u'hh23-5': u'eth1', u'hh23-6': u'eth1'}
-
- cmd = "hostname"
- (status, std_output, _) = self.execute(cmd)
- if status:
- return "Unknown"
- hostname = std_output.strip()
-
- if hostname in internal_iface_dict:
- iface = internal_iface_dict[hostname]
- else:
- return "Unknown"
-
- # Figure out which interface is for internal traffic
- if 'Linux bridge' in agent_type:
- ifname = iface
- elif 'Open vSwitch' in agent_type:
- if encap == 'vlan':
- # [root@hh23-10 ~]# ovs-vsctl list-ports br-inst
- # eth1
- # phy-br-inst
- cmd = 'ovs-vsctl list-ports ' + \
- iface + ' | grep -E "^[^phy].*"'
- (status, std_output, _) = self.execute(cmd)
- if status:
- return "Unknown"
- ifname = std_output.strip()
- elif encap == 'vxlan' or encap == 'gre':
- # This is complicated. We need to first get the local IP address on
- # br-tun, then do a reverse lookup to get the physical interface.
- #
- # [root@hh23-4 ~]# ip addr show to "23.23.2.14"
- # 3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
- # inet 23.23.2.14/24 brd 23.23.2.255 scope global eth1
- # valid_lft forever preferred_lft forever
- cmd = "ip addr show to " + iface + " | awk -F: '{print $2}'"
- (status, std_output, _) = self.execute(cmd)
- if status:
- return "Unknown"
- ifname = std_output.strip()
- else:
- return "Unknown"
-
- cmd = 'ethtool -i ' + ifname + ' | grep bus-info'
- (status, std_output, _) = self.execute(cmd)
- if status:
- return "Unknown"
- bus_info = re.search(r":\s(.*)", std_output).group(1)
-
- cmd = 'lspci -s ' + bus_info
- (status, std_output, _) = self.execute(cmd)
- if status:
- return "Unknown"
- nic_name = re.search(
- r"Ethernet controller:\s(.*)",
- std_output).group(1)
-
- return (nic_name)
-
- def get_l2agent_version(self, agent_type):
- '''
- Get the L2 agent version of the controller.
- Note: Here we are assuming the controller node has the exact
- hardware as the compute nodes.
- '''
- if 'Linux bridge' in agent_type:
- cmd = "brctl --version | awk -F',' '{print $2}'"
- ver_string = "Linux Bridge "
- elif 'Open vSwitch' in agent_type:
- cmd = "ovs-vsctl --version | awk -F')' '{print $2}'"
- ver_string = "OVS "
- else:
- return "Unknown"
-
- (status, std_output, _) = self.execute(cmd)
- if status:
- return "Unknown"
-
- return ver_string + std_output.strip()
-
-
-class SSHError(Exception):
- pass
-
-
-class SSHTimeout(SSHError):
- pass
-
-# Check IPv4 address syntax - not completely fool proof but will catch
-# some invalid formats
-
-
-def is_ipv4(address):
- try:
- socket.inet_aton(address)
- except socket.error:
- return False
- return True
-
-
-class SSHAccess(object):
-
- '''
- A class to contain all the information needed to access a host
- (native or virtual) using SSH
- '''
-
- def __init__(self, arg_value=None):
- '''
- decode user@host[:pwd]
- 'hugo@1.1.1.1:secret' -> ('hugo', '1.1.1.1', 'secret', None)
- 'huggy@2.2.2.2' -> ('huggy', '2.2.2.2', None, None)
- None ->(None, None, None, None)
- Examples of fatal errors (will call exit):
- 'hutch@q.1.1.1' (invalid IP)
- '@3.3.3.3' (missing username)
- 'hiro@' or 'buggy' (missing host IP)
- The error field will be None in case of success or will
- contain a string describing the error
- '''
- self.username = None
- self.host = None
- self.password = None
- # name of the file that contains the private key
- self.private_key_file = None
- # this is the private key itself (a long string starting with
- # -----BEGIN RSA PRIVATE KEY-----
- # used when the private key is not saved in any file
- self.private_key = None
- self.public_key_file = None
- self.port = 22
- self.error = None
-
- if not arg_value:
- return
- match = re.search(r'^([^@]+)@([0-9\.]+):?(.*)$', arg_value)
- if not match:
- self.error = 'Invalid argument: ' + arg_value
- return
- if not is_ipv4(match.group(2)):
- self.error = 'Invalid IPv4 address ' + match.group(2)
- return
- (self.username, self.host, self.password) = match.groups()
-
- def copy_from(self, ssh_access):
- self.username = ssh_access.username
- self.host = ssh_access.host
- self.port = ssh_access.port
- self.password = ssh_access.password
- self.private_key = ssh_access.private_key
- self.public_key_file = ssh_access.public_key_file
- self.private_key_file = ssh_access.private_key_file
-
-
-class SSH(Connection):
-
- """Represent ssh connection."""
-
- def __init__(self, ssh_access,
- connect_timeout=60,
- connect_retry_count=30,
- connect_retry_wait_sec=2):
- """Initialize SSH client.
- :param user: ssh username
- :param host: hostname or ip address of remote ssh server
- :param port: remote ssh port
- :param pkey: RSA or DSS private key string or file object
- :param key_filename: private key filename
- :param password: password
- :param connect_timeout: timeout when connecting ssh
- :param connect_retry_count: how many times to retry connecting
- :param connect_retry_wait_sec: seconds to wait between retries
- """
-
- self.ssh_access = ssh_access
- if ssh_access.private_key:
- self.pkey = self._get_pkey(ssh_access.private_key)
- else:
- self.pkey = None
- self._client = False
- self.connect_timeout = connect_timeout
- self.connect_retry_count = connect_retry_count
- self.connect_retry_wait_sec = connect_retry_wait_sec
- super(SSH, self).__init__()
-
- def _get_pkey(self, key):
- '''Get the binary form of the private key
- from the text form
- '''
- if isinstance(key, basestring):
- key = StringIO.StringIO(key)
- errors = []
- for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
- try:
- return key_class.from_private_key(key)
- except paramiko.SSHException as exc:
- errors.append(exc)
- raise SSHError('Invalid pkey: %s' % (errors))
-
- def _is_active(self):
- if self._client:
- try:
- transport = self._client.get_transport()
- session = transport.open_session()
- session.close()
- return True
- except Exception:
- return False
- else:
- return False
-
- def _get_client(self, force=False):
- if not force and self._is_active():
- return self._client
- if self._client:
- LOG.info('Re-establishing ssh connection with %s' % (self.ssh_access.host))
- self._client.close()
- self._client = paramiko.SSHClient()
- self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- for _ in range(self.connect_retry_count):
- try:
- self._client.connect(self.ssh_access.host,
- username=self.ssh_access.username,
- port=self.ssh_access.port,
- pkey=self.pkey,
- key_filename=self.ssh_access.private_key_file,
- password=self.ssh_access.password,
- timeout=self.connect_timeout)
- self._client.get_transport().set_keepalive(5)
- return self._client
- except (paramiko.AuthenticationException,
- paramiko.BadHostKeyException,
- paramiko.SSHException,
- socket.error,
- Exception):
- time.sleep(self.connect_retry_wait_sec)
-
- self._client = None
- msg = '[%s] SSH Connection failed after %s attempts' % (self.ssh_access.host,
- self.connect_retry_count)
- raise SSHError(msg)
-
- def _get_session(self):
- client = self._get_client()
- for _ in range(self.connect_retry_count):
- try:
- transport = client.get_transport()
- session = transport.open_session()
- return session
- except Exception:
- client = self._get_client(force=True)
- return None
-
- def close(self):
- super(SSH, self).close()
- if self._client:
- self._client.close()
- self._client = False
-
- def run(self, cmd, stdin=None, stdout=None, stderr=None,
- raise_on_error=True, timeout=3600, sudo=False):
- """Execute specified command on the server.
- :param cmd: Command to be executed.
- :param stdin: Open file or string to pass to stdin.
- :param stdout: Open file to connect to stdout.
- :param stderr: Open file to connect to stderr.
- :param raise_on_error: If False then exit code will be return. If True
- then exception will be raized if non-zero code.
- :param timeout: Timeout in seconds for command execution.
- Default 1 hour. No timeout if set to 0.
- :param sudo: Executes command as sudo with default password
- """
-
- if isinstance(stdin, basestring):
- stdin = StringIO.StringIO(stdin)
-
- return self._run(cmd, stdin=stdin, stdout=stdout,
- stderr=stderr, raise_on_error=raise_on_error,
- timeout=timeout, sudo=sudo)
-
- def _run(self, cmd, stdin=None, stdout=None, stderr=None,
- raise_on_error=True, timeout=3600, sudo=False):
-
- session = self._get_session()
-
- if session is None:
- raise SSHError('Unable to open session to ssh connection')
-
- if sudo:
- cmd = "echo " + self.ssh_access.password + " | sudo -S -p '' " + cmd
- session.get_pty()
-
- session.exec_command(cmd)
- start_time = time.time()
-
- data_to_send = ''
- stderr_data = None
-
- # If we have data to be sent to stdin then `select' should also
- # check for stdin availability.
- if stdin and not stdin.closed:
- writes = [session]
- else:
- writes = []
-
- while True:
- # Block until data can be read/write.
- select.select([session], writes, [session], 1)
-
- if session.recv_ready():
- data = session.recv(4096)
- if stdout is not None:
- stdout.write(data)
- continue
-
- if session.recv_stderr_ready():
- stderr_data = session.recv_stderr(4096)
- if stderr is not None:
- stderr.write(stderr_data)
- continue
-
- if session.send_ready():
- if stdin is not None and not stdin.closed:
- if not data_to_send:
- data_to_send = stdin.read(4096)
- if not data_to_send:
- stdin.close()
- session.shutdown_write()
- writes = []
- continue
- sent_bytes = session.send(data_to_send)
- data_to_send = data_to_send[sent_bytes:]
-
- if session.exit_status_ready():
- break
-
- if timeout and (time.time() - timeout) > start_time:
- args = {'cmd': cmd, 'host': self.ssh_access.host}
- raise SSHTimeout(('Timeout executing command '
- '"%(cmd)s" on host %(host)s') % args)
- # if e:
- # raise SSHError('Socket error.')
-
- exit_status = session.recv_exit_status()
- if 0 != exit_status and raise_on_error:
- fmt = ('Command "%(cmd)s" failed with exit_status %(status)d.')
- details = fmt % {'cmd': cmd, 'status': exit_status}
- if stderr_data:
- details += (' Last stderr data: "%s".') % stderr_data
- raise SSHError(details)
- return exit_status
-
- def execute(self, cmd, stdin=None, timeout=3600, sudo=False):
- """Execute the specified command on the server.
- :param cmd: Command to be executed.
- :param stdin: Open file to be sent on process stdin.
- :param timeout: Timeout for execution of the command.
- Return tuple (exit_status, stdout, stderr)
- """
- stdout = StringIO.StringIO()
- stderr = StringIO.StringIO()
-
- exit_status = self.run(cmd, stderr=stderr,
- stdout=stdout, stdin=stdin,
- timeout=timeout, raise_on_error=False, sudo=sudo)
- stdout.seek(0)
- stderr.seek(0)
- return (exit_status, stdout.read(), stderr.read())
-
- def wait(self, timeout=120, interval=1):
- """Wait for the host will be available via ssh."""
- start_time = time.time()
- while True:
- try:
- return self.execute('uname')
- except (socket.error, SSHError):
- time.sleep(interval)
- if time.time() > (start_time + timeout):
- raise SSHTimeout(
- ('Timeout waiting for "%s"') %
- self.ssh_access.host)
-
-
-class SubprocessTimeout(Exception):
- pass
-
-
-class Subprocess(Connection):
-
- """Represent subprocess connection."""
-
- def execute(self, cmd, stdin=None, timeout=3600):
- process = subprocess.Popen(shlex.split(cmd), stderr=subprocess.PIPE,
- stdout=subprocess.PIPE,
- shell=True)
- timer = threading.Timer(timeout, process.kill)
- stdout, stderr = process.communicate(input=stdin)
- status = process.wait()
- if timer.is_alive():
- timer.cancel()
- raise SubprocessTimeout('Timeout executing command "%(cmd)s"')
- return (status, stdout, stderr)
-
-
-##################################################
-# Only invoke the module directly for test purposes. Should be
-# invoked from pns script.
-##################################################
-def main():
- # As argument pass the SSH access string, e.g. "localadmin@1.1.1.1:secret"
- test_ssh = SSH(SSHAccess(sys.argv[1]))
-
- print 'ID=' + test_ssh.distro_id
- print 'ID_LIKE=' + test_ssh.distro_id_like
- print 'VERSION_ID=' + test_ssh.distro_version
-
- # ssh.wait()
- # print ssh.pidof('bash')
- # print ssh.stat('/tmp')
- print test_ssh.check_openstack_version()
- print test_ssh.get_cpu_info()
- print test_ssh.get_l2agent_version("Open vSwitch agent")
-
-if __name__ == "__main__":
- main()
diff --git a/nfvbench/nfvbench.py b/nfvbench/nfvbench.py
index cee54f0..920838a 100644
--- a/nfvbench/nfvbench.py
+++ b/nfvbench/nfvbench.py
@@ -295,9 +295,6 @@ def parse_opts_from_cli():
action='store',
help='Traffic generator profile to use')
- parser.add_argument('-i', '--image', dest='image_name',
- action='store',
- help='VM image name to use')
parser.add_argument('-0', '--no-traffic', dest='no_traffic',
default=None,
diff --git a/requirements.txt b/requirements.txt
index 90b491d..0a67060 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,7 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=1.10.0,<2.0
+pbr>=3.1.1
attrdict>=2.0.0
bitmath>=1.3.1.1
diff --git a/setup.py b/setup.py
index 36eead0..bf772f3 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
-
from setuptools.command.test import test
from setuptools import setup
import sys
diff --git a/test-requirements.txt b/test-requirements.txt
index a59e519..f72ef01 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -14,4 +14,4 @@ oslotest>=1.10.0 # Apache-2.0
testrepository>=0.0.18
testscenarios>=0.4
testtools>=1.4.0
-pytest>=3.0.2
+pytest>=3.2.2
diff --git a/tox.ini b/tox.ini
index 3f08b99..3120ac1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -10,7 +10,6 @@ setenv =
VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
-changedir=test
commands = py.test -q -s --basetemp={envtmpdir} {posargs}
[testenv:pep8]