aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/Dockerfile12
-rwxr-xr-xdocker/nfvbench-entrypoint.sh2
-rw-r--r--docs/testing/user/userguide/quickstart_docker.rst366
-rw-r--r--docs/testing/user/userguide/server.rst30
-rw-r--r--nfvbench/nfvbench.py45
-rw-r--r--nfvbench/nfvbenchd.py78
-rw-r--r--nfvbench/stats_collector.py51
-rw-r--r--requirements.txt4
8 files changed, 278 insertions, 310 deletions
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 1980575..f46e3d0 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -4,6 +4,9 @@ FROM ubuntu:16.04
ENV TREX_VER "v2.53"
ENV VM_IMAGE_VER "0.6"
+# Note: do not clone with --depth 1 as it will cause pbr to fail extracting the nfvbench version
+# from the git tag
+
RUN apt-get update && apt-get install -y \
git \
kmod \
@@ -14,7 +17,6 @@ RUN apt-get update && apt-get install -y \
wget \
net-tools \
libelf1 \
- && mkdir /tmp/http_root \
&& mkdir -p /opt/trex \
&& mkdir /var/log/nfvbench \
&& wget --no-cache https://trex-tgn.cisco.com/trex/release/$TREX_VER.tar.gz \
@@ -24,11 +26,13 @@ RUN apt-get update && apt-get install -y \
&& cp -a /opt/trex/$TREX_VER/automation/trex_control_plane/interactive/trex /usr/local/lib/python2.7/dist-packages/ \
&& rm -rf /opt/trex/$TREX_VER/automation/trex_control_plane/interactive/trex \
&& sed -i -e "s/2048 /512 /" -e "s/2048\"/512\"/" /opt/trex/$TREX_VER/trex-cfg \
- && pip install -U pip pbr \
- && hash -r pip \
+ && apt-get remove -y python-pip \
+ && wget https://bootstrap.pypa.io/get-pip.py \
+ && python get-pip.py \
+ && pip install -U pbr \
&& pip install -U setuptools \
&& cd / \
- && git clone --depth 1 https://gerrit.opnfv.org/gerrit/nfvbench \
+ && git clone https://gerrit.opnfv.org/gerrit/nfvbench \
&& cd /nfvbench && pip install -e . \
&& wget -O nfvbenchvm-$VM_IMAGE_VER.qcow2 http://artifacts.opnfv.org/nfvbench/images/nfvbenchvm_centos-$VM_IMAGE_VER.qcow2 \
&& python ./docker/cleanup_generators.py \
diff --git a/docker/nfvbench-entrypoint.sh b/docker/nfvbench-entrypoint.sh
index ed98ced..a7195a3 100755
--- a/docker/nfvbench-entrypoint.sh
+++ b/docker/nfvbench-entrypoint.sh
@@ -17,7 +17,7 @@
if [ -z "$1" ] || [ $1 != 'start_rest_server' ]; then
tail -f /dev/null
else
- PARAMS="--server /tmp/http_root"
+ PARAMS="--server"
if [ -n "$HOST" ]; then
PARAMS+=" --host $HOST"
fi
diff --git a/docs/testing/user/userguide/quickstart_docker.rst b/docs/testing/user/userguide/quickstart_docker.rst
index 6803bc3..adffaa3 100644
--- a/docs/testing/user/userguide/quickstart_docker.rst
+++ b/docs/testing/user/userguide/quickstart_docker.rst
@@ -11,6 +11,9 @@ NFVbench Installation and Quick Start Guide
Make sure you satisfy the `hardware and software requirements <requirements>` before you start .
+NFVbench can be used in CLI mode or in REST server mode.
+The CLI mode allows to run NFVbench benchmarks from the CLI. The REST server mode allows to run NFVbench benchmarks through a REST interface.
+
1. Container installation
-------------------------
@@ -20,113 +23,137 @@ To pull the latest NFVbench container image:
docker pull opnfv/nfvbench
-2. Docker Container configuration
----------------------------------
+2. NFVbench configuration file
+------------------------------
-The NFVbench container requires the following Docker options to operate properly.
+Create a directory under $HOME called nfvbench to store the minimal configuration file:
-+-------------------------------------------------------+-------------------------------------------------------+
-| Docker options | Description |
-+=======================================================+=======================================================+
-| -v /lib/modules/$(uname -r):/lib/modules/$(uname -r) | needed by kernel modules in the container |
-+-------------------------------------------------------+-------------------------------------------------------+
-| -v /usr/src/kernels:/usr/src/kernels | needed by TRex to build kernel modules when needed |
-+-------------------------------------------------------+-------------------------------------------------------+
-| -v /dev:/dev | needed by kernel modules in the container |
-+-------------------------------------------------------+-------------------------------------------------------+
-| -v $PWD:/tmp/nfvbench | optional but recommended to pass files between the |
-| | host and the docker space (see examples below) |
-| | Here we map the current directory on the host to the |
-| | /tmp/nfvbench director in the container but any |
-| | other similar mapping can work as well |
-+-------------------------------------------------------+-------------------------------------------------------+
-| --net=host | (optional) needed if you run the NFVbench |
-| | server in the container (or use any appropriate |
-| | docker network mode other than "host") |
-+-------------------------------------------------------+-------------------------------------------------------+
-| --privileged | (optional) required if SELinux is enabled on the host |
-+-------------------------------------------------------+-------------------------------------------------------+
-| -e HOST="127.0.0.1" | (optional) required if REST server is enabled |
-+-------------------------------------------------------+-------------------------------------------------------+
-| -e PORT=7556 | (optional) required if REST server is enabled |
-+-------------------------------------------------------+-------------------------------------------------------+
-| -e CONFIG_FILE="/root/nfvbenchconfig.json | (optional) required if REST server is enabled |
-+-------------------------------------------------------+-------------------------------------------------------+
+.. code-block:: bash
-It can be convenient to write a shell script (or an alias) to automatically insert the necessary options.
+ mkdir $HOME/nfvbench
-The minimal configuration file required must specify the PCI addresses of the 2 NIC ports to use.
-If OpenStack is used, the openrc_file property must be defined to point to a valid OpenStack rc file.
+Create a new file containing the minimal configuration for NFVbench, we can call it any name, for example "nfvbench.cfg" and paste the following yaml template in the file:
+
+.. code-block:: bash
+
+ openrc_file: /tmp/nfvbench/openrc
+ traffic_generator:
+ generator_profile:
+ - name: trex-local
+ tool: TRex
+ ip: 127.0.0.1
+ cores: 3
+ software_mode: false,
+ interfaces:
+ - port: 0
+ pci: "0a:00.0"
+ - port: 1
+ pci: "0a:00.1"
+ intf_speed:
+If OpenStack is not used, the openrc_file property can be removed.
-Here is an example of mimimal configuration using OpenStack where:
+If OpenStack is used, the openrc_file property must contain a valid container pathname of the OpenStack ``openrc`` file to connect to OpenStack using the OpenStack API.
+This file can be downloaded from the OpenStack Horizon dashboard (refer to the OpenStack documentation on how to
+retrieve the openrc file). This property must point to a valid pathname in the container (/tmp/nfvbench/openrc).
+We will map the host $HOME/nfvbench directory to the container /tmp/nfvbench directory and name the file "openrc".
+The file name viewed from the container will be "/tmp/nfvbench/openrc" (see container file pathname mapping in the next sections).
-- the openrc file is located on the host current directory which is mapped under /tmp/nfvbench in the container (this is achieved using -v $PWD:/tmp/nfvbench)
-- the 2 NIC ports to use for generating traffic have the PCI addresses "04:00.0" and "04:00.1"
+The PCI address of the 2 physical interfaces that will be used by the traffic generator must be configured.
+The PCI address can be obtained for example by using the "lspci" Linux command. For example:
.. code-block:: bash
- {
- "openrc_file": "/tmp/nfvbench/openrc",
- "traffic_generator": {
- "generator_profile": [
- {
- "interfaces": [
- {
- "pci": "04:00.0",
- "port": 0,
- },
- {
- "pci": "04:00.1",
- "port": 1,
- }
- ],
- "intf_speed": "",
- "ip": "127.0.0.1",
- "name": "trex-local",
- "software_mode": false,
- "tool": "TRex"
- }
- ]
- }
- }
+ [root@sjc04-pod6-build ~]# lspci | grep 710
+ 0a:00.0 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
+ 0a:00.1 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
+ 0a:00.2 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
+ 0a:00.3 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
-The other options in the minimal configuration must be present and must have the same values as above.
+In the above example, the PCI addresses "0a:00.0" and "0a:00.1" (first 2 ports of the quad port NIC) are used.
-3. Start the Docker container
------------------------------
-As for any Docker container, you can execute NFVbench measurement sessions using a temporary container ("docker run" - which exits after each NFVbench run)
-or you can decide to run the NFVbench container in the background then execute one or more NFVbench measurement sessions on that container ("docker exec").
+.. warning::
+
+ You have to put quotes around the pci addresses as shown in the above example, otherwise TRex will read it wrong.
+ The other fields in the minimal configuration must be present and must have the same values as above.
-The former approach is simpler to manage (since each container is started and terminated after each command) but incurs a small delay at start time (several seconds).
-The second approach is more responsive as the delay is only incurred once when starting the container.
-We will take the second approach and start the NFVbench container in detached mode with the name "nfvbench" (this works with bash, prefix with "sudo" if you do not use the root login)
+3. Starting NFVbench in CLI mode
+--------------------------------
-First create a new working directory, and change the current working directory to there. A "nfvbench_ws" directory under your home directory is good place for that, and this is where the OpenStack RC file and NFVbench config file will sit.
+In this mode, the NFVbench code will reside in a container running in the background. This container will not run anything in the background.
+An alias is then used to invoke a new NFVbench benchmark run using docker exec.
+The $HOME/nfvbench directory on the host is mapped on the /tmp/nfvbench in the container to facilitate file sharing between the 2 environments.
-To run NFVBench without server mode
+Start NFVbench container
+~~~~~~~~~~~~~~~~~~~~~~~~
+The NFVbench container can be started using docker run command or using docker compose.
+
+To run NFVBench in CLI mode using docker run:
.. code-block:: bash
- cd ~/nfvbench_ws
- docker run --detach --net=host --privileged -v $PWD:/tmp/nfvbench -v /dev:/dev -v /lib/modules/$(uname -r):/lib/modules/$(uname -r) -v /usr/src/kernels:/usr/src/kernels --name nfvbench opnfv/nfvbench
+ docker run --name nfvbench --detach --privileged -v /lib/modules/$(uname -r):/lib/modules/$(uname -r) -v /usr/src/kernels:/usr/src/kernels -v /dev:/dev -v $HOME/nfvbench:/tmp/nfvbench opnfv/nfvbench
-To run NFVBench enabling REST server (mount the configuration json and the path for openrc)
++-------------------------------------------------------+-------------------------------------------------------+
+| Docker options | Description |
++=======================================================+=======================================================+
+| --name nfvbench | container name is "nfvbench" |
++-------------------------------------------------------+-------------------------------------------------------+
+| --detach | run container in background |
++-------------------------------------------------------+-------------------------------------------------------+
+| --privileged | (optional) required if SELinux is enabled on the host |
++-------------------------------------------------------+-------------------------------------------------------+
+| -v /lib/modules:/lib/modules | needed by kernel modules in the container |
++-------------------------------------------------------+-------------------------------------------------------+
+| -v /usr/src/kernels:/usr/src/kernels | needed by TRex to build kernel modules when needed |
++-------------------------------------------------------+-------------------------------------------------------+
+| -v /dev:/dev | needed by kernel modules in the container |
++-------------------------------------------------------+-------------------------------------------------------+
+| -v $HOME/nfvbench:/tmp/nfvbench | folder mapping to pass files between the |
+| | host and the docker space (see examples below) |
+| | Here we map the $HOME/nfvbench directory on the host |
+| | to the /tmp/nfvbench director in the container. |
+| | Any other mapping can work as well |
++-------------------------------------------------------+-------------------------------------------------------+
+| opnfv/nfvbench | container image name |
++-------------------------------------------------------+-------------------------------------------------------+
+
+To run NFVbench using docker compose, create the docker-compose.yml file and paste the following content:
+
+.. code-block:: bash
+
+ version: '3'
+ services:
+ nfvbench:
+ image: "opnfv/nfvbench"
+ container_name: "nfvbench"
+ volumes:
+ - /dev:/dev
+ - /usr/src/kernels:/usr/src/kernels
+ - /lib/modules:/lib/modules
+ - ${HOME}/nfvbench:/tmp/nfvbench
+ network_mode: "host"
+ privileged: true
+
+Then start the container in detached mode:
.. code-block:: bash
- cd ~/nfvbench_ws
- docker run --detach --net=host --privileged -e HOST="127.0.0.1" -e PORT=7556 -e CONFIG_FILE="/tmp/nfvbench/nfvbenchconfig.json -v $PWD:/tmp/nfvbench -v /dev:/dev -v /lib/modules/$(uname -r):/lib/modules/$(uname -r) -v /usr/src/kernels:/usr/src/kernels --name nfvbench opnfv/nfvbench start_rest_server
+ docker-compose up -d
+Requesting an NFVbench benchmark run
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The create an alias to make it easy to execute nfvbench commands directly from the host shell prompt:
+Create an alias to make it easy to execute nfvbench commands directly from the host shell prompt:
.. code-block:: bash
alias nfvbench='docker exec -it nfvbench nfvbench'
-The next to last "nfvbench" refers to the name of the container while the last "nfvbench" refers to the NFVbench binary that is available to run in the container.
+The next to last "nfvbench" refers to the name of the container while the last "nfvbench" refers to the NFVbench binary that is available to run inside the container.
+
+Once the alias is set, NFVbench runs can simply be requested from teh command line using "nfvbench <options>".
To verify it is working:
@@ -135,102 +162,165 @@ To verify it is working:
nfvbench --version
nfvbench --help
+Example of run
+~~~~~~~~~~~~~~
-4. NFVbench configuration
--------------------------
+To do a single run at 10,000pps bi-directional (or 5kpps in each direction) using the PVP packet path:
-Create a new file containing the minimal configuration for NFVbench, we can call it any name, for example "my_nfvbench.cfg" and paste the following yaml template in the file:
+.. code-block:: bash
+
+ nfvbench -c /tmp/nfvbench/nfvbench.cfg --rate 10kpps
+
+NFVbench options used:
+
+* ``-c /tmp/nfvbench/nfvbench.cfg`` : specify the config file to use
+* ``--rate 10kpps`` : specify rate of packets for test for both directions using the kpps unit (thousands of packets per second)
+
+
+Retrieve complete configuration file as template
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The full configuration file template with comments (yaml format) can be obtained using the --show-default-config option in order to use more advanced configuration options:
.. code-block:: bash
- openrc_file:
- traffic_generator:
- generator_profile:
- - name: trex-local
- tool: TRex
- ip: 127.0.0.1
- cores: 3
- software_mode: false,
- interfaces:
- - port: 0
- pci:
- - port: 1
- pci:
- intf_speed:
+ nfvbench --show-default-config > $HOME/nfvbench/full_nfvbench.cfg
-If OpenStack is used, NFVbench requires an ``openrc`` file to connect to OpenStack using the OpenStack API. This file can be downloaded from the OpenStack Horizon dashboard (refer to the OpenStack documentation on how to
-retrieve the openrc file). The file pathname in the container must be stored in the "openrc_file" property. If it is stored on the host in the current directory, its full pathname must start with /tmp/nfvbench (since the current directory is mapped to /tmp/nfvbench in the container).
+Edit the full_nfvbench.cfg file to only keep those properties that need to be modified (preserving the nesting).
-If OpenStack is not used, remove the openrc_file property.
-The PCI address of the 2 physical interfaces that will be used by the traffic generator must be configured.
-The PCI address can be obtained for example by using the "lspci" Linux command. For example:
+4. Start NFVbench in REST server mode
+-------------------------------------
+In this mode, the NFVbench REST server will run in the container.
+The $HOME/nfvbench directory on the host is mapped on the /tmp/nfvbench in the container to facilitate file sharing between the 2 environments.
+
+Start NFVbench container
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+To start the NFVbench container with REST server using docker run cli:
.. code-block:: bash
- [root@sjc04-pod6-build ~]# lspci | grep 710
- 0a:00.0 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
- 0a:00.1 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
- 0a:00.2 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
- 0a:00.3 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01)
+ docker run --name nfvbench --detach --privileged --net=host -e CONFIG_FILE="/tmp/nfvbench/nfvbench.cfg" -v /lib/modules/$(uname -r):/lib/modules/$(uname -r) -v /usr/src/kernels:/usr/src/kernels -v /dev:/dev -v $HOME/nfvbench:/tmp/nfvbench opnfv/nfvbench start_rest_server
+
+REST mode requires the same arguments as CLI mode and adds the following options:
++-------------------------------------------------------+-------------------------------------------------------+
+| Docker options | Description |
++=======================================================+=======================================================+
+| --net=host | use "host" docker networking mode |
+| | Other modes (such as NAT) could be used if required |
+| | with proper adjustment of the port to use for REST |
++-------------------------------------------------------+-------------------------------------------------------+
+| -e CONFIG_FILE="/tmp/nfvbench/nfvbench.cfg" | (optional) |
+| | specify the initial NFVbench config file to use. |
+| | defaults to none |
++-------------------------------------------------------+-------------------------------------------------------+
+| start_rest_server | to request a REST server to run in background in the |
+| | container |
++-------------------------------------------------------+-------------------------------------------------------+
+| -e HOST="127.0.0.1" | (optional) |
+| | specify the IP address to listen to. |
+| | defaults to 127.0.0.1 |
++-------------------------------------------------------+-------------------------------------------------------+
+| -e PORT=7555 | (optional) |
+| | specify the port address to listen to. |
+| | defaults to 7555 |
++-------------------------------------------------------+-------------------------------------------------------+
+
+The initial configuration file is optional but is handy to define mandatory deployment parameters that are common to all subsequent REST requests.
+If this initial configuration file is not passed at container start time, it must be included in every REST request.
-Example of edited configuration with an OpenStack RC file stored in the current directory with the "openrc" name, and
-PCI addresses "0a:00.0" and "0a:00.1" (first 2 ports of the quad port NIC):
+To start the NFVbench container with REST server using docker compose, use the following compose file:
.. code-block:: bash
- openrc_file: /tmp/nfvbench/openrc
- traffic_generator:
- generator_profile:
- - name: trex-local
- tool: TRex
- ip: 127.0.0.1
- cores: 3
- software_mode: false,
- interfaces:
- - port: 0
- switch_port:
- pci: "0a:00.0"
- - port: 1
- switch_port:
- pci: "0a:00.1"
- intf_speed:
+ version: '3'
+ services:
+ nfvbench:
+ image: "opnfv/nfvbench"
+ container_name: "nfvbench_server"
+ command: start_rest_server
+ volumes:
+ - /dev:/dev
+ - /usr/src/kernels:/usr/src/kernels
+ - /lib/modules:/lib/modules
+ - ${HOME}/nfvbench:/tmp/nfvbench
+ network_mode: "host"
+ environment:
+ - HOST="127.0.0.1"
+ - PORT=7555
+ privileged: true
+
+Requesting an NFVbench benchmark run
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+To request a benchmark run, you must create a JSON document that describes the benchmark and send it to the NFVbench server in the body of a POST request.
+
+
+Examples of REST requests
+~~~~~~~~~~~~~~~~~~~~~~~~~
+In this example, we will use curl to interact with the NFVbench REST server.
+
+Query the NFVbench version:
-.. warning::
+.. code-block:: bash
- You have to put quotes around the pci addresses as shown in the above example, otherwise TRex will read it wrong.
+ [root@sjc04-pod3-mgmt ~]# curl -G http://127.0.0.1:7555/version
+ 3.1.1
-Alternatively, the full template with comments can be obtained using the --show-default-config option in yaml format:
+This is the JSON for a fixed rate run at 10,000pps bi-directional (or 5kpps in each direction) using the PVP packet path:
.. code-block:: bash
- nfvbench --show-default-config > my_nfvbench.cfg
+ {"rate": "10kpps"}
-Edit the nfvbench.cfg file to only keep those properties that need to be modified (preserving the nesting).
+This is the curl request to send this benchmark request to the NFVbench server:
-Make sure you have your nfvbench configuration file (my_nfvbench.cfg) and - if OpenStack is used - OpenStack RC file in your pre-created working directory.
+.. code-block:: bash
+ [root@sjc04-pod3-mgmt ~]# curl -H "Accept: application/json" -H "Content-type: application/json" -X POST -d '{"rate": "10kpps"}' http://127.0.0.1:7555/start_run
+ {
+ "error_message": "nfvbench run still pending",
+ "status": "PENDING"
+ }
+ [root@sjc04-pod3-mgmt ~]#
-5. Run NFVbench
----------------
+This request will return immediately with status set to "PENDING" if the request was started successfully.
-To do a single run at 10,000pps bi-directional (or 5kpps in each direction) using the PVP packet path:
+The status can be polled until the run completes. Here the poll returns a "PENDING" status, indicating the run is still not completed:
.. code-block:: bash
- nfvbench -c /tmp/nfvbench/my_nfvbench.cfg --rate 10kpps
+ [root@sjc04-pod3-mgmt ~]# curl -G http://127.0.0.1:7555/status
+ {
+ "error_message": "nfvbench run still pending",
+ "status": "PENDING"
+ }
+ [root@sjc04-pod3-mgmt ~]#
-NFVbench options used:
+Finally, the status request returns a "OK" status along with the full results (truncated here):
-* ``-c /tmp/nfvbench/my_nfvbench.cfg`` : specify the config file to use (this must reflect the file path from inside the container)
-* ``--rate 10kpps`` : specify rate of packets for test for both directions using the kpps unit (thousands of packets per second)
+.. code-block:: bash
-This should produce a result similar to this (a simple run with the above options should take less than 5 minutes):
+ [root@sjc04-pod3-mgmt ~]# curl -G http://127.0.0.1:7555/status
+ {
+ "result": {
+ "benchmarks": {
+ "network": {
+ "service_chain": {
+ "PVP": {
+ "result": {
+ "bidirectional": true,
+
+ ...
+
+ "status": "OK"
+ }
+ [root@sjc04-pod3-mgmt ~]#
-.. code-block:: none
- [TBP]
+Retrieve complete configuration file as template
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7. Terminating the NFVbench container
diff --git a/docs/testing/user/userguide/server.rst b/docs/testing/user/userguide/server.rst
index 921e3bc..806927b 100644
--- a/docs/testing/user/userguide/server.rst
+++ b/docs/testing/user/userguide/server.rst
@@ -5,36 +5,6 @@
NFVbench Server mode and NFVbench client API
============================================
-NFVbench can run as an HTTP server to:
-
-- optionally provide access to any arbitrary HTLM files (HTTP server function) - this is optional
-- service fully parameterized aynchronous run requests using the HTTP protocol (REST/json with polling)
-- service fully parameterized run requests with interval stats reporting using the WebSocket/SocketIO protocol.
-
-Start the NFVbench server
--------------------------
-To run in server mode, simply use the --server <http_root_path> and optionally the listen address to use (--host <ip>, default is 0.0.0.0) and listening port to use (--port <port>, default is 7555).
-
-
-If HTTP files are to be serviced, they must be stored right under the http root path.
-This root path must contain a static folder to hold static files (css, js) and a templates folder with at least an index.html file to hold the template of the index.html file to be used.
-This mode is convenient when you do not already have a WEB server hosting the UI front end.
-If HTTP files servicing is not needed (REST only or WebSocket/SocketIO mode), the root path can point to any dummy folder.
-
-Once started, the NFVbench server will be ready to service HTTP or WebSocket/SocketIO requests at the advertised URL.
-
-Example of NFVbench server start in a container:
-
-.. code-block:: bash
-
- # get to the container shell (assume the container name is "nfvbench")
- docker exec -it nfvbench bash
- # from the container shell start the NFVbench server in the background
- nfvbench -c /tmp/nfvbench/nfvbench.cfg --server /tmp &
- # exit container
- exit
-
-
HTTP Interface
--------------
diff --git a/nfvbench/nfvbench.py b/nfvbench/nfvbench.py
index cdb99c8..bb73d68 100644
--- a/nfvbench/nfvbench.py
+++ b/nfvbench/nfvbench.py
@@ -36,7 +36,7 @@ import credentials as credentials
from fluentd import FluentLogHandler
import log
from log import LOG
-from nfvbenchd import WebSocketIoServer
+from nfvbenchd import WebServer
from specs import ChainType
from specs import Specs
from summarizer import NFVBenchSummarizer
@@ -71,6 +71,11 @@ class NFVBench(object):
self.notifier = notifier
def run(self, opts, args):
+ """This run() method is called for every NFVbench benchmark request.
+
+ In CLI mode, this method is called only once per invocation.
+ In REST server mode, this is called once per REST POST request
+ """
status = NFVBench.STATUS_OK
result = None
message = ''
@@ -82,6 +87,12 @@ class NFVBench(object):
try:
# recalc the running config based on the base config and options for this run
self._update_config(opts)
+
+ # check that an empty openrc file (no OpenStack) is only allowed
+ # with EXT chain
+ if not self.config.openrc_file and self.config.service_chain != ChainType.EXT:
+ raise Exception("openrc_file in the configuration is required for PVP/PVVP chains")
+
self.specs.set_run_spec(self.config_plugin.get_run_spec(self.config,
self.specs.openstack))
self.chain_runner = ChainRunner(self.config,
@@ -239,10 +250,8 @@ def _parse_opts_from_cli():
parser.add_argument('--server', dest='server',
default=None,
- action='store',
- metavar='<http_root_pathname>',
- help='Run nfvbench in server mode and pass'
- ' the HTTP root folder full pathname')
+ action='store_true',
+ help='Run nfvbench in server mode')
parser.add_argument('--host', dest='host',
action='store',
@@ -574,14 +583,6 @@ def main():
print json.dumps(config, sort_keys=True, indent=4)
sys.exit(0)
- # check that an empty openrc file (no OpenStack) is only allowed
- # with EXT chain
- if not config.openrc_file:
- if config.service_chain == ChainType.EXT:
- LOG.info('EXT chain with OpenStack mode disabled')
- else:
- raise Exception("openrc_file is empty in the configuration and is required")
-
# update the config in the config plugin as it might have changed
# in a copy of the dict (config plugin still holds the original dict)
config_plugin.set_config(config)
@@ -599,18 +600,14 @@ def main():
nfvbench_instance = NFVBench(config, openstack_spec, config_plugin, factory)
if opts.server:
- if os.path.isdir(opts.server):
- server = WebSocketIoServer(opts.server, nfvbench_instance, fluent_logger)
- nfvbench_instance.set_notifier(server)
- try:
- port = int(opts.port)
- except ValueError:
- server.run(host=opts.host)
- else:
- server.run(host=opts.host, port=port)
+ server = WebServer(nfvbench_instance, fluent_logger)
+ try:
+ port = int(opts.port)
+ except ValueError:
+ server.run(host=opts.host)
else:
- print 'Invalid HTTP root directory: ' + opts.server
- sys.exit(1)
+ server.run(host=opts.host, port=port)
+ # server.run() should never return
else:
with utils.RunLock():
run_summary_required = True
diff --git a/nfvbench/nfvbenchd.py b/nfvbench/nfvbenchd.py
index fa781af..ae89e7a 100644
--- a/nfvbench/nfvbenchd.py
+++ b/nfvbench/nfvbenchd.py
@@ -16,37 +16,31 @@
import json
import Queue
+from threading import Thread
import uuid
from flask import Flask
from flask import jsonify
-from flask import render_template
from flask import request
-from flask_socketio import emit
-from flask_socketio import SocketIO
from summarizer import NFVBenchSummarizer
from log import LOG
from utils import byteify
from utils import RunLock
-# this global cannot reside in Ctx because of the @app and @socketio decorators
-app = None
-socketio = None
+from __init__ import __version__
STATUS_OK = 'OK'
STATUS_ERROR = 'ERROR'
STATUS_PENDING = 'PENDING'
STATUS_NOT_FOUND = 'NOT_FOUND'
-
def result_json(status, message, request_id=None):
body = {
'status': status,
'error_message': message
}
-
if request_id is not None:
body['request_id'] = request_id
@@ -66,15 +60,13 @@ class Ctx(object):
run_queue = Queue.Queue()
busy = False
result = None
- request_from_socketio = False
results = {}
ids = []
current_id = None
@staticmethod
- def enqueue(config, request_id, from_socketio=False):
+ def enqueue(config, request_id):
Ctx.busy = True
- Ctx.request_from_socketio = from_socketio
config['request_id'] = request_id
Ctx.run_queue.put(config)
@@ -129,40 +121,18 @@ class Ctx(object):
return Ctx.current_id
-def setup_flask(root_path):
- global socketio
- global app
+def setup_flask():
app = Flask(__name__)
- app.root_path = root_path
- socketio = SocketIO(app, async_mode='threading')
busy_json = result_json(STATUS_ERROR, 'there is already an NFVbench request running')
not_busy_json = result_json(STATUS_ERROR, 'no pending NFVbench run')
not_found_msg = 'results not found'
pending_msg = 'NFVbench run still pending'
- # --------- socketio requests ------------
-
- @socketio.on('start_run')
- def _socketio_start_run(config):
- if not Ctx.is_busy():
- Ctx.enqueue(config, get_uuid(), from_socketio=True)
- else:
- emit('error', {'reason': 'there is already an NFVbench request running'})
-
- @socketio.on('echo')
- def _socketio_echo(config):
- emit('echo', config)
-
# --------- HTTP requests ------------
- @app.route('/')
- def _index():
- return render_template('index.html')
-
- @app.route('/echo', methods=['GET'])
- def _echo():
- config = request.json
- return jsonify(config)
+ @app.route('/version', methods=['GET'])
+ def _version():
+ return __version__
@app.route('/start_run', methods=['POST'])
def _start_run():
@@ -201,23 +171,24 @@ def setup_flask(root_path):
return jsonify(res)
return jsonify(not_busy_json)
+ return app
+
-class WebSocketIoServer(object):
- """This class takes care of the web socketio server, accepts websocket events, and sends back
- notifications using websocket events (send_ methods). Caller should simply create an instance
+class WebServer(object):
+ """This class takes care of the web server. Caller should simply create an instance
of this class and pass a runner object then invoke the run method
"""
- def __init__(self, http_root, runner, fluent_logger):
+ def __init__(self, runner, fluent_logger):
self.nfvbench_runner = runner
- setup_flask(http_root)
+ self.app = setup_flask()
self.fluent_logger = fluent_logger
- def run(self, host='127.0.0.1', port=7556):
+ def run(self, host, port):
- # socketio.run will not return so we need to run it in a background thread so that
+ # app.run will not return so we need to run it in a background thread so that
# the calling thread (main thread) can keep doing work
- socketio.start_background_task(target=socketio.run, app=app, host=host, port=port)
+ Thread(target=self.app.run, args=(host, port)).start()
# wait for run requests
# the runner must be executed from the main thread (Trex client library requirement)
@@ -238,11 +209,8 @@ class WebSocketIoServer(object):
results = result_json(STATUS_ERROR, str(exc))
LOG.exception('NFVbench runner exception:')
- if Ctx.request_from_socketio:
- socketio.emit('run_end', results)
- else:
- # this might overwrite a previously unfetched result
- Ctx.set_result(results)
+ # this might overwrite a previously unfetched result
+ Ctx.set_result(results)
try:
summary = NFVBenchSummarizer(results['result'], self.fluent_logger)
LOG.info(str(summary))
@@ -255,13 +223,3 @@ class WebSocketIoServer(object):
Ctx.release()
if self.fluent_logger:
self.fluent_logger.send_run_summary(True)
-
- def send_interval_stats(self, time_ms, tx_pps, rx_pps, drop_pct):
- stats = {'time_ms': time_ms, 'tx_pps': tx_pps, 'rx_pps': rx_pps, 'drop_pct': drop_pct}
- socketio.emit('run_interval_stats', stats)
-
- def send_ndr_found(self, ndr_pps):
- socketio.emit('ndr_found', {'rate_pps': ndr_pps})
-
- def send_pdr_found(self, pdr_pps):
- socketio.emit('pdr_found', {'rate_pps': pdr_pps})
diff --git a/nfvbench/stats_collector.py b/nfvbench/stats_collector.py
index 964d704..dc750db 100644
--- a/nfvbench/stats_collector.py
+++ b/nfvbench/stats_collector.py
@@ -56,9 +56,7 @@ class IntervalCollector(StatsCollector):
self.notifier = notifier
def add(self, stats):
- if self.notifier:
- current_stats = self.__compute_tx_rx_diff(stats)
- self.notifier.send_interval_stats(**current_stats)
+ pass
def reset(self):
# don't reset time!
@@ -66,52 +64,7 @@ class IntervalCollector(StatsCollector):
self.last_tx_pkts = 0
def add_ndr_pdr(self, tag, stats):
- if self.notifier:
-
- current_time = self._get_current_time_diff()
- rx_pps = self._get_rx_pps(stats['tx_pps'], stats['drop_percentage'])
-
- self.last_tx_pkts = stats['tx_pps'] / 1000 * (current_time - self.last_time)
- self.last_rx_pkts = rx_pps / 1000 * (current_time - self.last_time)
- self.last_time = current_time
-
- # 'drop_pct' key is an unfortunate name, since in iteration stats it means
- # number of the packets. More suitable would be 'drop_percentage'.
- # FDS frontend depends on this key
- current_stats = {
- '{}_pps'.format(tag): stats['tx_pps'],
- 'tx_pps': stats['tx_pps'],
- 'rx_pps': rx_pps,
- 'drop_pct': stats['drop_percentage'],
- 'time_ms': current_time
- }
-
- self.notifier.send_interval_stats(time_ms=current_stats['time_ms'],
- tx_pps=current_stats['tx_pps'],
- rx_pps=current_stats['rx_pps'],
- drop_pct=current_stats['drop_pct'])
- if tag == 'ndr':
- self.notifier.send_ndr_found(stats['tx_pps'])
- else:
- self.notifier.send_pdr_found(stats['tx_pps'])
-
- def __compute_tx_rx_diff(self, stats):
- current_time = self._get_current_time_diff()
- tx_diff = stats['overall']['tx']['total_pkts'] - self.last_tx_pkts
- tx_pps = (tx_diff * 1000) / (current_time - self.last_time)
- rx_diff = stats['overall']['rx']['total_pkts'] - self.last_rx_pkts
- rx_pps = (rx_diff * 1000) / (current_time - self.last_time)
-
- self.last_rx_pkts = stats['overall']['rx']['total_pkts']
- self.last_tx_pkts = stats['overall']['tx']['total_pkts']
- self.last_time = current_time
-
- return {
- 'tx_pps': tx_pps,
- 'rx_pps': rx_pps,
- 'drop_pct': max(0.0, (1 - (float(rx_pps) / tx_pps)) * 100),
- 'time_ms': current_time
- }
+ pass
class IterationCollector(StatsCollector):
diff --git a/requirements.txt b/requirements.txt
index 0a67060..490864c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -20,8 +20,4 @@ pyzmq>=15.3.0
requests>=2.13.0
tabulate>=0.7.5
flask>=0.12
-flask_socketio>=2.8.3
-backports.ssl-match-hostname==3.5.0.1 # via websocket-client
-socketIO-client==0.7.2
-websocket-client==0.40.0 # via socketio-client
fluent-logger>=0.5.3