From 04a7de082bd221eae3c7004f4e0b99dfa4f8be91 Mon Sep 17 00:00:00 2001 From: ahothan Date: Fri, 28 Jul 2017 17:08:46 -0700 Subject: Initial code drop from Cisco Change-Id: Ie2993886dc8e95c5f73ccdb871add8b96ffcc849 Signed-off-by: ahothan --- docs/testing/user/userguide/_static/custom.css | 4 + docs/testing/user/userguide/_templates/layout.html | 5 + docs/testing/user/userguide/advanced.rst | 318 +++++++++++++++ docs/testing/user/userguide/conf.py | 344 ++++++++++++++++ docs/testing/user/userguide/examples.rst | 9 + docs/testing/user/userguide/faq.rst | 28 ++ docs/testing/user/userguide/hw_requirements.rst | 79 ++++ .../user/userguide/images/extchain-config.svg | 219 ++++++++++ .../user/userguide/images/nfvbench-npvp.svg | 107 +++++ .../testing/user/userguide/images/nfvbench-pvp.svg | 94 +++++ .../user/userguide/images/nfvbench-pvvp-inter.svg | 132 ++++++ .../user/userguide/images/nfvbench-pvvp-intra.svg | 114 ++++++ .../userguide/images/nfvbench-spirent-setup.svg | 170 ++++++++ .../user/userguide/images/nfvbench-trex-setup.svg | 170 ++++++++ docs/testing/user/userguide/index.rst | 30 ++ docs/testing/user/userguide/installation.rst | 14 + docs/testing/user/userguide/quickstart_docker.rst | 224 +++++++++++ docs/testing/user/userguide/readme.rst | 163 ++++++++ docs/testing/user/userguide/server.rst | 445 +++++++++++++++++++++ 19 files changed, 2669 insertions(+) create mode 100644 docs/testing/user/userguide/_static/custom.css create mode 100644 docs/testing/user/userguide/_templates/layout.html create mode 100644 docs/testing/user/userguide/advanced.rst create mode 100644 docs/testing/user/userguide/conf.py create mode 100644 docs/testing/user/userguide/examples.rst create mode 100644 docs/testing/user/userguide/faq.rst create mode 100644 docs/testing/user/userguide/hw_requirements.rst create mode 100644 docs/testing/user/userguide/images/extchain-config.svg create mode 100644 docs/testing/user/userguide/images/nfvbench-npvp.svg create mode 100644 docs/testing/user/userguide/images/nfvbench-pvp.svg create mode 100644 docs/testing/user/userguide/images/nfvbench-pvvp-inter.svg create mode 100644 docs/testing/user/userguide/images/nfvbench-pvvp-intra.svg create mode 100644 docs/testing/user/userguide/images/nfvbench-spirent-setup.svg create mode 100644 docs/testing/user/userguide/images/nfvbench-trex-setup.svg create mode 100644 docs/testing/user/userguide/index.rst create mode 100644 docs/testing/user/userguide/installation.rst create mode 100644 docs/testing/user/userguide/quickstart_docker.rst create mode 100644 docs/testing/user/userguide/readme.rst create mode 100644 docs/testing/user/userguide/server.rst (limited to 'docs/testing/user/userguide') diff --git a/docs/testing/user/userguide/_static/custom.css b/docs/testing/user/userguide/_static/custom.css new file mode 100644 index 0000000..6cbfde3 --- /dev/null +++ b/docs/testing/user/userguide/_static/custom.css @@ -0,0 +1,4 @@ +.wy-nav-content { + max-width: 1200px !important; +} + diff --git a/docs/testing/user/userguide/_templates/layout.html b/docs/testing/user/userguide/_templates/layout.html new file mode 100644 index 0000000..f3387d5 --- /dev/null +++ b/docs/testing/user/userguide/_templates/layout.html @@ -0,0 +1,5 @@ +{% extends "!layout.html" %} +{% block extrahead %} + +{% endblock %} + diff --git a/docs/testing/user/userguide/advanced.rst b/docs/testing/user/userguide/advanced.rst new file mode 100644 index 0000000..f757b46 --- /dev/null +++ b/docs/testing/user/userguide/advanced.rst @@ -0,0 +1,318 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. SPDX-License-Identifier: CC-BY-4.0 +.. (c) Cisco Systems, Inc + +============== +Advanced Usage +============== + +This section covers a few examples on how to run NFVbench with multiple different settings. +Below are shown the most common and useful use-cases and explained some fields from a default config file. + +How to change any NFVbench run configuration (CLI) +-------------------------------------------------- +NFVbench always starts with a default configuration which can further be partially refined (overridden) by the user from the CLI or from REST requests. + +At first have a look at the default config: + +.. code-block:: bash + + nfvbench --show-default-config + +It is sometimes useful derive your own configuration from a copy of the default config: + +.. code-block:: bash + + nfvbench --show-default-config > nfvbench.cfg + +At this point you can edit the copy by: + +- removing any parameter that is not to be changed (since NFVbench will always load the default configuration, default values are not needed) +- edit the parameters that are to be changed changed + +A run with the new confguration can then simply be requested using the -c option and by using the actual path of the configuration file +as seen from inside the container (in this example, we assume the current directory is mapped to /tmp/nfvbench in the container): + +.. code-block:: bash + + nfvbench -c /tmp/nfvbench/nfvbench.cfg + +The same -c option also accepts any valid yaml or json string to override certain parameters without having to create a configuration file. + +NFVbench also provides many configuration options as optional arguments. For example the number of flows can be specified using the --flow-count option. + +For example, flow count can be specified in any of 3 ways: + +- by providing a confguration file that has the flow_count value to use (-c myconfig.yaml and myconfig.yaml contains 'flow_count: 100k') +- by passing that yaml paremeter inline (-c "flow_count: 100k") or (-c "{flow_count: 100k}") +- by using the flow count optional argument (--flow-count 100k) + +Showing the running configuration +--------------------------------- + +Because configuration parameters can be overriden, it is sometimes useful to show the final configuration (after all oevrrides are done) by using the --show-config option. +This final configuration is also called the "running" configuration. + +For example, this will only display the running configuration (without actually running anything): + +.. code-block:: bash + + nfvbench -c "{flow_count: 100k, debug: true}" --show-config + + +Connectivity and Configuration Check +------------------------------------ + +NFVbench allows to test connectivity to devices used with selected flow test, for example PVP. +It runs the whole test, but without actually sending any traffic or influencing interface counters. +It is also a good way to check if everything is configured properly in the config file and what versions of components are used. + + +To verify everything works without sending any traffic, use the --no-traffic option: + +.. code-block:: bash + + nfvbench --no-traffic + +Used parameters: + +* ``--no-traffic`` or ``-0`` : sending traffic from traffic generator is skipped + + + +Fixed Rate Run +-------------- + +Fixed rate run is the most basic type of NFVbench usage. It is usually used to verify that some amount of packets can pass network components in selected flow. + +The first example shows how to run PVP flow (default flow) with multiple different settings: + +.. code-block:: bash + + nfvbench -c nfvbench.cfg --no-reset --no-cleanup --rate 100000pps --duration 30 --interval 15 --json results.json + +Used parameters: + +* ``-c nfvbench.cfg`` : path to the config file +* ``--no-cleanup`` : resources (networks, VMs, attached ports) are not deleted after test is finished +* ``--rate 100000pps`` : defines rate of packets sent by traffic generator +* ``--duration 30`` : specifies how long should traffic be running in seconds +* ``--interval 15`` : stats are checked and shown periodically (in seconds) in this interval when traffic is flowing +* ``--json results.json`` : collected data are stored in this file after run is finished + +.. note:: It is your responsibility to clean up resources if needed when ``--no-cleanup`` parameter is used. + +The ``--json`` parameter makes it easy to store NFVbench results. To display collected results in a table form, do: + +.. code-block:: bash + + nfvbench --show-summary results.json # or shortcut -ss results.json + + +Second example aims to show how to specify which supported flow to run: + +.. code-block:: bash + + nfvbench -c nfvbench.cfg --rate 1Mbps --inter-node --service-chain PVVP + +Used parameters: + +* ``-c nfvbench.cfg`` : path to the config file +* ``--rate 1Mbps`` : defines rate of packets sent by traffic generator +* ``--inter-node`` : VMs are created on different compute nodes, works only with PVVP flow +* ``--service-chain PVVP`` or ``-sc PVVP`` : specifies type of flow to use, default is PVP + +.. note:: When parameter ``--inter-node`` is not used or there aren't enough compute nodes, VMs are on the same compute node. + + +Rate Units +^^^^^^^^^^ + +Parameter ``--rate`` accepts different types of values: + +* packets per second (pps, kpps, mpps), e.g. ``1000pps`` or ``10kpps`` +* load percentage (%), e.g. ``50%`` +* bits per second (bps, kbps, Mbps, Gbps), e.g. ``1Gbps``, ``1000bps`` +* NDR/PDR (ndr, pdr, ndr_pdr), e.g. ``ndr_pdr`` + +The last mentioned value, NDR/PDR, is default one and its usage is covered more below. + + +NDR and PDR +----------- + +NDR and PDR test is used to determine performance of your setup, maximum packets throughput. + +* NDR (No Drop Rate): how many packets can be sent so (almost) none of them are dropped +* PDR (Partial Drop Rate): how many packets can be sent so drop rate is below given limit + +Config file contains section where settings for NDR/PDR can be set. +Increasing number of attempts helps to minimize a chance of traffic hiccups influencing result. +Other way of increasing precision is to specify longer duration for traffic to run. + +.. code-block:: bash + + # NDR/PDR configuration + measurement: + # Drop rates represent the ratio of dropped packet to the total number of packets sent. + # Values provided here are percentages. A value of 0.01 means that at most 0.01% of all + # packets sent are dropped (or 1 packet every 10,000 packets sent) + + # No Drop Rate; Default to 0.001% + NDR: 0.001 + # Partial Drop Rate; NDR should always be less than PDR + PDR: 0.1 + # The accuracy of NDR and PDR load percentiles; The actual load percentile that match NDR + # or PDR should be within `load_epsilon` difference than the one calculated. + load_epsilon: 0.1 + +Because NDR/PDR is the default ``--rate`` value, it's possible to run NFVbench simply like this: + +.. code-block:: bash + + nfvbench -c nfvbench.cfg + +Other custom run: + +.. code-block:: bash + + nfvbench -c nfvbench.cfg --duration 120 --json results.json + +Used parameters: + +* ``-c nfvbench.cfg`` : path to the config file +* ``--duration 120`` : specifies how long should be traffic running in each iteration +* ``--json results.json`` : collected data are stored in this file after run is finished + + +Multichain +---------- + +NFVbench allows to run multiple chains at the same time. For example it is possible to run PVP service chain N-times, +where N can be as much as your compute power can scale. With N = 10, NFVbench will spawn 10 VMs as a part of 10 simultaneous PVP chains. + +Number of chains is specified by ``--service-chain-count`` or ``-scc`` flag, default value is 1. +For example to run NFVbench with 3 PVP chains use command: + +.. code-block:: bash + + nfvbench -c nfvbench.cfg --rate 10000pps -scc 3 + +It is not necessary to specify service chain because PVP is set as default. PVP service chains will have 3 VMs in 3 chains with this configuration. +If ``-sc PVVP`` is specified instead, there would be 6 VMs in 3 chains as this service chain has 2 VMs per chain. +Both **single run** or **NDR/PDR** can be run as multichain. Running multichain is a scenario closer to a real life situation than just simple run. + + +External Chain +-------------- + +NFVbench can measure the performance of 1 or more L3 service chains that are setup externally. Instead of being setup by NFVbench, +the complete environment (VMs and networks) has to be setup prior to running NFVbench. + +Each external chain is made of 1 or more VNFs and has exactly 2 end network interfaces (left and right network interfaces) that are connected to 2 neutron networks (left and right networks). +The internal composition of a multi-VNF service chain can be arbitrary (usually linear) as far as NFVbench is concerned, +the only requirement is that the service chain can route L3 packets properly between the left and right networks. + +To run NFVbench on such external service chains: + +- explicitly tell NFVbench to use external service chain by adding ``-sc EXT`` or ``--service-chain EXT`` to NFVbench CLI options +- specify the number of external chains using the ``-scc`` option (defaults to 1 chain) +- specify the 2 end point networks of your environment in ``external_networks`` inside the config file. + - The two networks specified there have to exist in Neutron and will be used as the end point networks by NFVbench ('napa' and 'marin' in the diagram below) +- specify the router gateway IPs for the external service chains (1.1.0.2 and 2.2.0.2) +- specify the traffic generator gateway IPs for the external service chains (1.1.0.102 and 2.2.0.102 in diagram below) +- specify the packet source and destination IPs for the virtual devices that are simulated (10.0.0.0/8 and 20.0.0.0/8) + + +.. image:: images/extchain-config.svg + +The L3 router function must be enabled in the VNF and configured to: + +- reply to ARP requests to its public IP addresses on both left and right networks +- route packets from each set of remote devices toward the appropriate dest gateway IP in the traffic generator using 2 static routes (as illustrated in the diagram) + +Upon start, NFVbench will: +- first retrieve the properties of the left and right networks using Neutron APIs, +- extract the underlying network ID (either VLAN ID or VNI if VxLAN is used), +- then program the TOR to stitch the 2 interfaces from the traffic generator into each end of the service chain, +- then generate and measure traffic. + +Note that in the case of multiple chains, all chains end interfaces must be connected to the same two left and right networks. +The traffic will be load balanced across the corresponding gateway IP of these external service chains. + +.. note:: By default, interfaces configuration (TOR, VTS, etc.) will be run by NFVbench but these can be skipped by using ``--no-int-config`` flag. + + +Multiflow +--------- + +NFVbench always generates L3 packets from the traffic generator but allows the user to specify how many flows to generate. +A flow is identified by a unique src/dest MAC IP and port tuple that is sent by the traffic generator. Note that from a vswitch point of view, the +number of flows seen will be higher as it will be at least 4 times the number of flows sent by the traffic generator +(add reverse direction of vswitch to traffic generator, add flow to VM and flow from VM). + + +The number of flows will be spread roughly even between chains when more than 1 chain is being tested. +For example, for 11 flows and 3 chains, number of flows that will run for each chain will be 3, 4, and 4 flows respectively. + +The number of flows is specified by ``--flow-count`` or ``-fc`` flag, the default value is 2 (1 flow in each direction). +To run NFVbench with 3 chains and 100 flows, use the following command: + +.. code-block:: bash + + nfvbench -c nfvbench.cfg --rate 10000pps -scc 3 -fc 100 + + +IP addresses generated can be controlled with the following NFVbench configuration options: + +.. code-block:: bash + + ip_addrs: ['10.0.0.0/8', '20.0.0.0/8'] + ip_addrs_step: 0.0.0.1 + tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100'] + tg_gateway_ip_addrs_step: 0.0.0.1 + gateway_ip_addrs: ['1.1.0.2', '2.2.0.2'] + gateway_ip_addrs_step: 0.0.0.1 + +``ip_addrs`` are the start of the 2 ip address ranges used by the traffic generators as the packets source and destination packets +where each range is associated to virtual devices simulated behind 1 physical interface of the traffic generator. +These can also be written in CIDR notation to represent the subnet. + +``tg_gateway_ip_addrs`` are the traffic generator gateway (virtual) ip addresses, all traffic to/from the virtual devices go through them. + +``gateway_ip_addrs`` are the 2 gateway ip address ranges of the VMs used in the external chains. They are only used with external chains and must correspond to their public IP address. + +The corresponding ``step`` is used for ranging the IP addresses from the `ip_addrs``, ``tg_gateway_ip_addrs`` and ``gateway_ip_addrs`` base addresses. +0.0.0.1 is the default step for all IP ranges. In ``ip_addrs``, 'random' can be configured which tells NFVBench to generate random src/dst IP pairs in the traffic stream. + + +Traffic Config via CLI +---------------------- + +While traffic configuration can modified using the config file, it became a hassle to have to change the config file everytime you need to change traffic config. + +Traffic config can be overridden with the CLI options. + +Here is an example of configuring traffic via CLI: + +.. code-block:: bash + + nfvbench --rate 10kpps --service-chain-count 2 -fs 64 -fs IMIX -fs 1518 --unidir + +This command will run NFVbench with two streams with unidirectional flow for three packet sizes 64B, IMIX, and 1518B. + +Used parameters: + +* ``--rate 10kpps`` : defines rate of packets sent by traffic generator (total TX rate) +* ``-scc 2`` or ``--service-chain-count 2`` : specifies number of parallel chains of given flow to run (default to 1) +* ``-fs 64`` or ``--frame-size 64``: add the specified frame size to the list of frame sizes to run +* ``--unidir`` : run traffic with unidirectional flow (default to bidirectional flow) + + +MAC Addresses +------------- + +NFVbench will dicover the MAC addresses to use for generated frames using: +- either OpenStack discovery (find the MAC of an existing VM) if the loopback VM is configured to run L2 forwarding +- or using dynamic ARP discovery (find MAC from IP) if the loopback VM is configured to run L3 routing or in the case of external chains. + diff --git a/docs/testing/user/userguide/conf.py b/docs/testing/user/userguide/conf.py new file mode 100644 index 0000000..638764c --- /dev/null +++ b/docs/testing/user/userguide/conf.py @@ -0,0 +1,344 @@ +# -*- coding: utf-8 -*- +# +# NFVBench documentation build configuration file, created by +# sphinx-quickstart on Thu Sep 29 14:25:18 2016. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) +import os +import sys +from pbr import version as pbr_ver + +sys.path.insert(0, os.path.abspath('../..')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +# +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'NFVBench' +copyright = u'2016 Cisco Systems, Inc.' +author = u'Cisco Systems, Inc.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = pbr_ver.VersionInfo(project).version_string() +# The full version, including alpha/beta/rc tags. +release = pbr_ver.VersionInfo(project).version_string_with_vcs() + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# +# today = '' +# +# Else, today_fmt is used as the format for a strftime call. +# +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +#html_theme = 'haiku' +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. +# " v documentation" by default. +# +# html_title = u'NFVBench vdev117' + +# A shorter title for the navigation bar. Default is the same as html_title. +# +html_short_title = 'nfvbench' + +# The name of an image file (relative to this directory) to place at the topß +# of the sidebar. +# +# html_logo = None + +# The name of an image file (relative to this directory) to use as a favicon of +# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# +# html_extra_path = [] + +# If not None, a 'Last updated on:' timestamp is inserted at every page +# bottom, using the given strftime format. +# The empty string is equivalent to '%b %d, %Y'. +# +# html_last_updated_fmt = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# +html_use_smartypants = False + +# Custom sidebar templates, maps document names to template names. +# +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# +# html_additional_pages = {} + +# If false, no module index is generated. +# +# html_domain_indices = True + +# If false, no index is generated. +# +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' +# +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# 'ja' uses this config value. +# 'zh' user can custom change `jieba` dictionary path. +# +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'NFVBenchdoc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'NFVBench.tex', u'NFVBench Documentation', + u'Alec Hothan, Stefano Suryanto, Jan Balaz', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# +# latex_use_parts = False + +# If true, show page references after internal links. +# +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# +# latex_appendices = [] + +# It false, will not define \strong, \code, itleref, \crossref ... but only +# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added +# packages. +# +# latex_keep_old_macro_names = True + +# If false, no module index is generated. +# +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'nfvbench', u'NFVBench Documentation', + [author], 1) +] + +# If true, show URL addresses after external links. +# +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'NFVBench', u'NFVBench Documentation', + author, 'NFVBench', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# +# texinfo_appendices = [] + +# If false, no module index is generated. +# +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# +# texinfo_no_detailmenu = False diff --git a/docs/testing/user/userguide/examples.rst b/docs/testing/user/userguide/examples.rst new file mode 100644 index 0000000..4fc68b7 --- /dev/null +++ b/docs/testing/user/userguide/examples.rst @@ -0,0 +1,9 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. SPDX-License-Identifier: CC-BY-4.0 +.. (c) Cisco Systems, Inc + +Example of Results +****************** + + + diff --git a/docs/testing/user/userguide/faq.rst b/docs/testing/user/userguide/faq.rst new file mode 100644 index 0000000..cb5acb5 --- /dev/null +++ b/docs/testing/user/userguide/faq.rst @@ -0,0 +1,28 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. SPDX-License-Identifier: CC-BY-4.0 +.. (c) Cisco Systems, Inc + +Frequently Asked Questions +************************** + + +Can NFVbench be used with a different traffic generator than TRex? +------------------------------------------------------------------ +This is possible but requires developing a new python class to manage the new traffic generator interface. + +Can I connect Trex directly to my compute node? +----------------------------------------------- +That is possible but you will not be able to run more advanced use cases such as PVVP inter-node which requires 2 compute nodes. + + +Can I drive NFVbench using a REST interface? +-------------------------------------------- +NFVbench can run in server mode and accept HTTP or WebSocket/SocketIO events to run any type of measurement (fixed rate run or NDR_PDR run) +with any run configuration. + + +Can I run NFVbench on a Cisco UCS-B series blade? +------------------------------------------------- +Yes provided your UCS-B series server has a Cisco VIC 1340 (with a recent firmware version). +TRex will require VIC firmware version 3.1(2) or higher for blade servers (which supports more filtering capabilities). +In this setting, the 2 physical interfaces for data plane traffic are simply hooked to the UCS-B fabric interconnect (no need to connect to a switch). diff --git a/docs/testing/user/userguide/hw_requirements.rst b/docs/testing/user/userguide/hw_requirements.rst new file mode 100644 index 0000000..acb4c0a --- /dev/null +++ b/docs/testing/user/userguide/hw_requirements.rst @@ -0,0 +1,79 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. SPDX-License-Identifier: CC-BY-4.0 +.. (c) Cisco Systems, Inc + +Requirements for running NFVbench +================================= + +.. _requirements: + +Hardware Requirements +--------------------- +To run NFVbench you need the following hardware: +- a Linux server +- a DPDK compatible NIC with at least 2 ports (preferably 10Gbps or higher) +- 2 ethernet cables between the NIC and the OpenStack pod under test (usually through a top of rack switch) + +The DPDK-compliant NIC must be one supported by the TRex traffic generator (such as Intel X710, refer to the `Trex Installation Guide `_ for a complete list of supported NIC) + +To run the TRex traffic generator (that is bundled with NFVbench) you will need to wire 2 physical interfaces of the NIC to the TOR switch(es): + - if you have only 1 TOR, wire both interfaces to that same TOR + - 1 interface to each TOR if you have 2 TORs and want to use bonded links to your compute nodes + +.. image:: images/nfvbench-trex-setup.svg + + +Switch Configuration +-------------------- +For VLAN encapsulation, the 2 corresponding ports on the switch(es) facing the Trex ports on the Linux server should be configured in trunk mode (NFVbench will instruct TRex to insert the appropriate vlan tag). + +For VxLAN encapsulation, the switch(es) must support the VTEP feature (VxLAN Tunnel End Point) with the ability to attach an interface to a VTEP (this is an advanced feature that requires an NFVbench plugin for the switch). + +Using a TOR switch is more representative of a real deployment and allows to measure packet flows on any compute node in the rack without rewiring and includes the overhead of the TOR switch. + +Although not the primary targeted use case, NFVbench could also support the direct wiring of the traffic generator to +a compute node without a switch (although that will limit some of the features that invove multiple compute nodes in the packet path). + +Software Requirements +--------------------- + +You need Docker to be installed on the Linux server. + +TRex uses the DPDK interface to interact with the DPDK compatible NIC for sending and receiving frames. The Linux server will +need to be configured properly to enable DPDK. + +DPDK requires a uio (User space I/O) or vfio (Virtual Function I/O) kernel module to be installed on the host to work. +There are 2 main uio kernel modules implementations (igb_uio and uio_pci_generic) and one vfio kernel module implementation. + +To check if a uio or vfio is already loaded on the host: + +.. code-block:: bash + + lsmod | grep -e igb_uio -e uio_pci_generic -e vfio + + +If missing, it is necessary to install a uio/vfio kernel module on the host server: + +- find a suitable kernel module for your host server (any uio or vfio kernel module built with the same Linux kernel version should work) +- load it using the modprobe and insmod commands + +Example of installation of the igb_uio kernel module: + +.. code-block:: bash + + modprobe uio + insmod ./igb_uio.ko + +Finally, the correct iommu options and huge pages to be configured on the Linux server on the boot command line: + +- enable intel_iommu and iommu pass through: "intel_iommu=on iommu=pt" +- for Trex, pre-allocate 1024 huge pages of 2MB each (for a total of 2GB): "hugepagesz=2M hugepages=1024" + +More detailed instructions can be found in the DPDK documentation (https://media.readthedocs.org/pdf/dpdk/latest/dpdk.pdf). + + +NFVbench loopback VM image Upload +--------------------------------- + +The NFVbench loopback VM image should be uploaded to OpenStack prior to running NFVbench. +The NFVbench VM qcow2 image can be rebuilt from script or can be copied from the OPNFV artifact repository [URL TBP]. diff --git a/docs/testing/user/userguide/images/extchain-config.svg b/docs/testing/user/userguide/images/extchain-config.svg new file mode 100644 index 0000000..4e3db47 --- /dev/null +++ b/docs/testing/user/userguide/images/extchain-config.svg @@ -0,0 +1,219 @@ + + + + Produced by OmniGraffle 7.3 + 2017-03-31 20:15:29 +0000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ext chain + + pvvp-intra + + + + + + + + + + traffic generator + + + + + + + SERVICE CHAIN + + + + + + + + DC-S + W + + + + + + + + + + + L3 + VNF + + + + + gwA + + + + + + + gwB + + + + right public IP + 2.2.0.2 + + + + right virtual devices + + + + static routes: + 20.0.0.0/8 gw 2.2.0.102 + 10.0.0.0/8 gw 1.1.0.102 + + + + right virtual gateway + 2.2.0.102 + + + + + + + + + + 20.0.0.0/8 + devices + + + + + + + + + 10.0.0.0/8 + devices + + + ’marin’ + service chain left network + 1.1.0.0/24 + + + + ‘napa’ + service chain right network + 2.2.0.0/24 + + + + left public IP + 1.1.0.2 + + + + left virtual gateway + 1.1.0.102 + + + + left virtual devices + + + + nfvbenc + h confi + g fi + le: + + + internal_network_name: [‘marin', ‘napa'] + traffic_generator: + ip_addrs: ['10.0.0.0/8', '20.0.0.0/8'] + ip_addrs_step: 0.0.0.1 + tg_gateway_ip_addrs: ['1.1.0.102’, '2.2.0.102’] + tg_gateway_ip_addrs_step: 0.0.0.1 + gateway_ip_addrs: ['1.1.0.2', '2.2.0.2'] + gateway_ip_addrs_step: 0.0.0.1 + + + + + + + + diff --git a/docs/testing/user/userguide/images/nfvbench-npvp.svg b/docs/testing/user/userguide/images/nfvbench-npvp.svg new file mode 100644 index 0000000..f72af34 --- /dev/null +++ b/docs/testing/user/userguide/images/nfvbench-npvp.svg @@ -0,0 +1,107 @@ + + + + Produced by OmniGraffle 7.2 + 2016-11-13 19:58:51 +0000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nfvbench-flows 2 + + pvvp-intra + + N single VNF c + hains + (N x PVP + , N=2) + + + + nxpvp + + + + + + + + + + DC-S + W + + + + + + + Com + put + e node + + + + + + vswitc + h + + + + + VNF1 + + + + + VNF2 + + + + + NIC + + + + + + + + tr + affi + c + gener + at + or + + + + + diff --git a/docs/testing/user/userguide/images/nfvbench-pvp.svg b/docs/testing/user/userguide/images/nfvbench-pvp.svg new file mode 100644 index 0000000..e023b1f --- /dev/null +++ b/docs/testing/user/userguide/images/nfvbench-pvp.svg @@ -0,0 +1,94 @@ + + + + Produced by OmniGraffle 7.2 + 2016-11-13 19:58:51 +0000 + + + + + + + + + + + + + + + + + + + + + + + + + + nfvbench-flows 2 + + pvvp-intra + + single VNF c + hain (PVP) + + + + pvp + + + + + + + + + + Com + put + e node + + + + + + NIC + + + + + + DC-S + W + + + + + + vswitc + h + + + + + VNF + + + + + + + tr + affi + c + gener + at + or + + + + + diff --git a/docs/testing/user/userguide/images/nfvbench-pvvp-inter.svg b/docs/testing/user/userguide/images/nfvbench-pvvp-inter.svg new file mode 100644 index 0000000..3371346 --- /dev/null +++ b/docs/testing/user/userguide/images/nfvbench-pvvp-inter.svg @@ -0,0 +1,132 @@ + + + + Produced by OmniGraffle 7.2 + 2016-11-13 19:58:51 +0000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nfvbench-flows 2 + + pvvp-intra + + + + + + 2- + VNF c + hain + (int + er + -node PVVP) + + + + + + Com + put + e node A + + + + + + + Com + put + e node B + + + + + pvv-inter + + + + + + + + NIC + + + + + + DC-S + W + + + + + + vswitc + h + + + + + VNF1a + + + + + VNF1b + + + + + NIC + + + + + vswitc + h + + + + + + + + tr + affi + c + gener + at + or + + + + + diff --git a/docs/testing/user/userguide/images/nfvbench-pvvp-intra.svg b/docs/testing/user/userguide/images/nfvbench-pvvp-intra.svg new file mode 100644 index 0000000..6c454b2 --- /dev/null +++ b/docs/testing/user/userguide/images/nfvbench-pvvp-intra.svg @@ -0,0 +1,114 @@ + + + + Produced by OmniGraffle 7.2 + 2016-11-13 19:58:51 +0000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nfvbench-flows 2 + + pvvp-intra + + + + + + + + Com + put + e node + + + + + + NIC + + + + + + DC-S + W + + + + + + vswitc + h + + + + + VNF1a + + + + + VNF1b + + + 2- + VNF c + hain (PVVP) + + + + + + + tr + affi + c + gener + at + or + + + + + pvp + + + + diff --git a/docs/testing/user/userguide/images/nfvbench-spirent-setup.svg b/docs/testing/user/userguide/images/nfvbench-spirent-setup.svg new file mode 100644 index 0000000..e149fc0 --- /dev/null +++ b/docs/testing/user/userguide/images/nfvbench-spirent-setup.svg @@ -0,0 +1,170 @@ + + + + Produced by OmniGraffle 7.2.1 + 2016-12-03 22:53:19 +0000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nfvbench-spirent-setup + + Layer 1 + + + + Mercury Build Node + + + + + + + + + nfvbench + + + + + Compute node 1 + + + + + N9K TOR + + + + + T-rex + + + + + + Spirent + + + + + + Control Node + + + + + Storage + + + + + + Control Node + + + + + Control Node + + + + + Storage + + + + + Storage + + + + + Compute node i + + + + + N9K TOR + + + + + + + + + + Compute node N + + + + vPC + + + + 1 + + + + 2 + + + pull spirent test center + and nfvbench containers + + + + connect spirent to TOR + with 2 x 10G cables + + + + nfvbench/Spirent setup + + + + + Spirent + Test + Center + + + + + diff --git a/docs/testing/user/userguide/images/nfvbench-trex-setup.svg b/docs/testing/user/userguide/images/nfvbench-trex-setup.svg new file mode 100644 index 0000000..3f68006 --- /dev/null +++ b/docs/testing/user/userguide/images/nfvbench-trex-setup.svg @@ -0,0 +1,170 @@ + + + + Produced by OmniGraffle 7.2.2 + 2017-03-17 17:57:44 +0000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nfvbench-trex-setup + + Layer 1 + + + + Mercury Build Node + + + + + + + nfvbench + + + + + Compute node 1 + + + + + N9K TOR + + + + + T-rex + + + + + + Intel + X710 + NIC + + + + + + Control Node + + + + + Storage + + + + + + Control Node + + + + + Control Node + + + + + Storage + + + + + Storage + + + + + Compute node i + + + + + N9K TOR + + + + + + + + + + Compute node N + + + + + vPC + + + + 1 + + + + 2 + + + + 3 + + + add Intel X710 NIC + + + + pull nfvbench container + + + + add a 10G cable + to each TOR + + + + nfvbench/T-rex setup + + + + diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst new file mode 100644 index 0000000..a7eb1e9 --- /dev/null +++ b/docs/testing/user/userguide/index.rst @@ -0,0 +1,30 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. SPDX-License-Identifier: CC-BY-4.0 +.. (c) Cisco Systems, Inc + + +.. NFVBench documentation master file, created by + sphinx-quickstart on Thu Sep 29 14:25:18 2016. + + +NFVbench: A Network Performance Benchmarking Tool for NFVi Stacks +***************************************************************** + +The NFVbench tool provides an automated way to measure the network performance for the most common data plane packet flows on any OpenStack system. +It is designed to be easy to install and easy to use by non experts (no need to be an expert in traffic generators and data plane performance testing). + + +Table of Content +---------------- +.. toctree:: + :maxdepth: 2 + + readme + installation + examples + advanced + server + faq + + + diff --git a/docs/testing/user/userguide/installation.rst b/docs/testing/user/userguide/installation.rst new file mode 100644 index 0000000..8a0511a --- /dev/null +++ b/docs/testing/user/userguide/installation.rst @@ -0,0 +1,14 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. SPDX-License-Identifier: CC-BY-4.0 +.. (c) Cisco Systems, Inc + +=================================== +Installation and Quick Start Guides +=================================== + +.. toctree:: + :maxdepth: 2 + + hw_requirements + quickstart_docker + diff --git a/docs/testing/user/userguide/quickstart_docker.rst b/docs/testing/user/userguide/quickstart_docker.rst new file mode 100644 index 0000000..2c9f762 --- /dev/null +++ b/docs/testing/user/userguide/quickstart_docker.rst @@ -0,0 +1,224 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. SPDX-License-Identifier: CC-BY-4.0 +.. (c) Cisco Systems, Inc + +=========================================== +NFVbench Installation and Quick Start Guide +=========================================== + +.. _docker_installation: + +Make sure you satisfy the `hardware and software requirements ` before you start . + + +1. Container installation +------------------------- + +To pull the latest NFVbench container image: + +.. code-block:: bash + + docker pull opnfv/nfvbench/nfvbench + +2. Docker Container configuration +--------------------------------- + +The NFVbench container requires the following Docker options to operate properly. + ++------------------------------------------------------+------------------------------------------------------+ +| Docker options | Description | ++======================================================+======================================================+ +| -v /lib/modules/$(uname -r):/lib/modules/$(uname -r) | needed by kernel modules in the container | ++------------------------------------------------------+------------------------------------------------------+ +| -v /dev:/dev | needed by kernel modules in the container | ++------------------------------------------------------+------------------------------------------------------+ +| -v $PWD:/tmp/nfvbench | optional but recommended to pass files between the | +| | host and the docker space (see examples below) | +| | Here we map the current directory on the host to the | +| | /tmp/nfvbench director in the container but any | +| | other similar mapping can work as well | ++------------------------------------------------------+------------------------------------------------------+ +| --net=host | (optional) needed if you run the NFVbench REST | +| | server in the container (or use any appropriate | +| | docker network mode other than "host") | ++------------------------------------------------------+------------------------------------------------------+ +| --privilege | (optional) required if SELinux is enabled on the host| ++------------------------------------------------------+------------------------------------------------------+ + +It can be convenient to write a shell script (or an alias) to automatically insert the necessary options. + +3. Start the Docker container +----------------------------- +As for any Docker container, you can execute NFVbench measurement sessions using a temporary container ("docker run" - which exits after each NFVbench run) +or you can decide to run the NFVbench container in the background then execute one or more NFVbench measurement sessions on that container ("docker exec"). + +The former approach is simpler to manage (since each container is started and terminated after each command) but incurs a small delay at start time (several seconds). +The second approach is more responsive as the delay is only incurred once when starting the container. + +We will take the second approach and start the NFVbench container in detached mode with the name "nfvbench" (this works with bash, prefix with "sudo" if you do not use the root login) + +.. code-block:: bash + + docker run --detach --net=host --privileged -v $PWD:/tmp/nfvbench -v /dev:/dev -v /lib/modules/$(uname -r):/lib/modules/$(uname -r) --name nfvbench opnfv/nfvbench tail -f /dev/null + +The tail command simply prevents the container from exiting. + +The create an alias to make it easy to execute nfvbench commands directly from the host shell prompt: + +.. code-block:: bash + + alias nfvbench='docker exec -it nfvbench nfvbench' + +The next to last "nfvbench" refers to the name of the container while the last "nfvbench" refers to the NFVbench binary that is available to run in the container. + +To verify it is working: + +.. code-block:: bash + + nfvbench --version + nfvbench --help + + +4. NFVbench configuration +------------------------- + +Create a new file containing the minimal configuration for NFVbench, we can call it any name, for example "my_nfvbench.cfg" and paste the following yaml template in the file: + +.. code-block:: bash + + openrc_file: + traffic_generator: + generator_profile: + - name: trex-local + tool: TRex + ip: 127.0.0.1 + cores: 3 + interfaces: + - port: 0 + switch_port: + pci: + - port: 1 + switch_port: + pci: + intf_speed: 10Gbps + +NFVbench requires an ``openrc`` file to connect to OpenStack using the OpenStack API. This file can be downloaded from the OpenStack Horizon dashboard (refer to the OpenStack documentation on how to +retrieve the openrc file). The file pathname in the container must be stored in the "openrc_file" property. If it is stored on the host in the current directory, its full pathname must start with /tmp/nfvbench (since the current directory is mapped to /tmp/nfvbench in the container). + +The required configuration is the PCI address of the 2 physical interfaces that will be used by the traffic generator. The PCI address can be obtained for example by using the "lspci" Linux command. For example: + +.. code-block:: bash + + [root@sjc04-pod6-build ~]# lspci | grep 710 + 0a:00.0 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01) + 0a:00.1 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01) + 0a:00.2 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01) + 0a:00.3 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 01) + + +Example of edited configuration with an OpenStack RC file stored in the current directory with the "openrc" name, and +PCI addresses "0a:00.0" and "0a:00.1" (first 2 ports of the quad port NIC): + +.. code-block:: bash + + openrc_file: /tmp/nfvbench/openrc + traffic_generator: + generator_profile: + - name: trex-local + tool: TRex + ip: 127.0.0.1 + cores: 3 + interfaces: + - port: 0 + switch_port: + pci: 0a:00.0 + - port: 1 + switch_port: + pci: 0a:00.1 + intf_speed: 10Gbps + +Alternatively, the full template with comments can be obtained using the --show-default-config option in yaml format: + +.. code-block:: bash + + nfvbench --show-default-config > my_nfvbench.cfg + +Edit the nfvbench.cfg file to only keep those properties that need to be modified (preserving the nesting) + + +5. Upload the NFVbench loopback VM image to OpenStack +----------------------------------------------------- +[TBP URL to NFVbench VM image in the OPNFV artifact repository] + + +6. Run NFVbench +--------------- + +To do a single run at 5000pps bi-directional using the PVP packet path: + +.. code-block:: bash + + nfvbench -c /tmp/nfvbench/my_nfvbench.cfg --rate 5kpps + +NFVbench options used: + +* ``-c /tmp/nfvbench/my_nfvbench.cfg`` : specify the config file to use (this must reflect the file path from inside the container) +* ``--rate 5kpps`` : specify rate of packets for test using the kpps unit (thousands of packets per second) + +This should produce a result similar to this (a simple run with the above options should take less than 5 minutes): + +.. code-block:: none + + ========== nfvbench Summary ========== + Date: 2016-10-05 21:43:30 + nfvbench version 0.0.1.dev128 + Mercury version: 5002 + Benchmarks: + > Networks: + > N9K version: {'10.28.108.249': {'BIOS': '07.34', 'NXOS': '7.0(3)I2(2b)'}, '10.28.108.248': {'BIOS': '07.34', 'NXOS': '7.0(3)I2(2b)'}} + Traffic generator profile: trex-c45 + Traffic generator tool: TRex + Traffic generator API version: {u'build_date': u'Aug 24 2016', u'version': u'v2.08', u'built_by': u'hhaim', u'build_time': u'16:32:13'} + Flows: + > PVP: + VPP version: {u'sjc04-pod3-compute-6': 'v16.06-rc1~27-gd175728'} + > Bidirectional: False + Profile: traffic_profile_64B + + +-----------------+-------------+----------------------+----------------------+----------------------+ + | L2 Frame Size | Drop Rate | Avg Latency (usec) | Min Latency (usec) | Max Latency (usec) | + +=================+=============+======================+======================+======================+ + | 64 | 0.0000% | 22.1885 | 10 | 503 | + +-----------------+-------------+----------------------+----------------------+----------------------+ + + + > L2 frame size: 64 + Flow analysis duration: 70.0843 seconds + + Run Config: + + +-------------+------------------+--------------+-----------+ + | Direction | Duration (sec) | Rate | Rate | + +=============+==================+==============+===========+ + | Forward | 60 | 1.0080 Mbps | 1,500 pps | + +-------------+------------------+--------------+-----------+ + | Reverse | 60 | 672.0000 bps | 1 pps | + +-------------+------------------+--------------+-----------+ + + +----------------------+----------+-----------------+---------------+---------------+-----------------+---------------+---------------+ + | Interface | Device | Packets (fwd) | Drops (fwd) | Drop% (fwd) | Packets (rev) | Drops (rev) | Drop% (rev) | + +======================+==========+=================+===============+===============+=================+===============+===============+ + | traffic-generator | trex | 90,063 | | | 61 | 0 | - | + +----------------------+----------+-----------------+---------------+---------------+-----------------+---------------+---------------+ + | traffic-generator | trex | 90,063 | 0 | - | 61 | | | + +----------------------+----------+-----------------+---------------+---------------+-----------------+---------------+---------------+ + +7. Terminating the NFVbench container +------------------------------------- +When no longer needed, the container can be terminated using the usual docker commands: + +.. code-block:: bash + + docker kill nfvbench + docker rm nfvbench + diff --git a/docs/testing/user/userguide/readme.rst b/docs/testing/user/userguide/readme.rst new file mode 100644 index 0000000..17ce889 --- /dev/null +++ b/docs/testing/user/userguide/readme.rst @@ -0,0 +1,163 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. SPDX-License-Identifier: CC-BY-4.0 +.. (c) Cisco Systems, Inc + +Features +******** + +Data Plane Performance Measurement Features +------------------------------------------- + +NFVbench supports the following main measurement capabilities: + +- supports 2 measurement modes: + - *fixed rate* mode to generate traffic at a fixed rate for a fixed duration + - NDR (No Drop Rate) and PDR (Partial Drop Rate) measurement mode +- configurable frame sizes (any list of fixed sizes or 'IMIX') +- built-in packet paths +- built-in loopback VNFs based on fast L2 or L3 forwarders running in VMs +- configurable number of flows and service chains +- configurable traffic direction (single or bi-directional) + + +NDR is the highest throughput achieved without dropping packets. +PDR is the highest throughput achieved without dropping more than a pre-set limit (called PDR threshold or allowance, expressed in %). + +Results of each run include the following data: + +- Aggregated achieved throughput in bps +- Aggregated achieved packet rate in pps (or fps) +- Actual drop rate in % +- Latency in usec (min, max, average in the current version) + +Built-in OpenStack support +-------------------------- +NFVbench can stage OpenStack resources to build 1 or more service chains using direct OpenStack APIs. Each service chain is composed of: + +- 1 or 2 loopback VM instances per service chain +- 2 Neutron networks per loopback VM + +OpenStack resources are staged before traffic is measured using OpenStack APIs (Nova and Neutron) then disposed after completion of measurements. + +The loopback VM flavor to use can be configured in the NFVbench configuration file. + +Note that NFVbench does not use OpenStack Heat nor any higher level service (VNFM or NFVO) to create the service chains because its +main purpose is to measure the performance of the NFVi infrastructure which is mainly focused on L2 forwarding performance. + +External Chains +--------------- +NFVbench also supports settings that involve externally staged packet paths with or without OpenStack: + +- run benchmarks on existing service chains at the L3 level that are staged externally by any other tool (e.g. any VNF capable of L3 routing) +- run benchmarks on existing L2 chains that are configured externally (e.g. pure L2 forwarder such as DPDK testpmd) + + +Traffic Generation +------------------ + +NFVbench currently integrates with the open source TRex traffic generator: + +- `TRex `_ (pre-built into the NFVbench container) + + +Supported Packet Paths +---------------------- +Packet paths describe where packets are flowing in the NFVi platform. The most commonly used paths are identified by 3 or 4 letter abbreviations. +A packet path can generally describe the flow of packets associated to one or more service chains, with each service chain composed of 1 or more VNFs. + +The following packet paths are currently supported by NFVbench: + +- PVP (Physical interface to VM to Physical interface) +- PVVP (Physical interface to VM to VM to Physical interface) +- N*PVP (N concurrent PVP packet paths) +- N*PVVP (N concurrent PVVP packet paths) + +The traffic is made of 1 or more flows of L3 frames (UDP packets) with different payload sizes. Each flow is identified by a unique source and destination MAC/IP tuple. + + +Loopback VM +^^^^^^^^^^^ + +NFVbench provides a loopback VM image that runs CentOS with 2 pre-installed forwarders: + +- DPDK testpmd configured to do L2 cross connect between 2 virtual interfaces +- FD.io VPP configured to perform L3 routing between 2 virtual interfaces + +Frames are just forwarded from one interface to the other. +In the case of testpmd, the source and destination MAC are rewritten, which corresponds to the mac forwarding mode (--forward-mode=mac). +In the case of VPP, VPP will act as a real L3 router, and the packets are routed from one port to the other using static routes. + +Which forwarder and what Nova flavor to use can be selected in the NFVbench configuration. Be default the DPDK testpmd forwarder is used with 2 vCPU per VM. +The configuration of these forwarders (such as MAC rewrite configuration or static route configuration) is managed by NFVbench. + + +PVP Packet Path +^^^^^^^^^^^^^^^ + +This packet path represents a single service chain with 1 loopback VNF and 2 Neutron networks: + +.. image:: images/nfvbench-pvp.svg + + +PVVP Packet Path +^^^^^^^^^^^^^^^^ + +This packet path represents a single service chain with 2 loopback VNFs in sequence and 3 Neutron networks. +The 2 VNFs can run on the same compute node (PVVP intra-node): + +.. image:: images/nfvbench-pvvp-intra.svg + +or on different compute nodes (PVVP inter-node) based on a configuration option: + +.. image:: images/nfvbench-pvvp-inter.svg + + + +Multi-Chaining (N*PVP or N*PVVP) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Multiple service chains can be setup by NFVbench without any limit on the concurrency (other than limits imposed by available resources on compute nodes). +In the case of multiple service chains, NFVbench will instruct the traffic generator to use multiple L3 packet streams (frames directed to each path will have a unique destination MAC address). + +Example of multi-chaining with 2 concurrent PVP service chains: + +.. image:: images/nfvbench-npvp.svg + +This innovative feature will allow to measure easily the performance of a fully loaded compute node running multiple service chains. + +Multi-chaining is currently limited to 1 compute node (PVP or PVVP intra-node) or 2 compute nodes (for PVVP inter-node). +The 2 edge interfaces for all service chains will share the same 2 networks. + + +Other Misc Packet Paths +^^^^^^^^^^^^^^^^^^^^^^^ + +P2P (Physical interface to Physical interface - no VM) can be supported using the external chain/L2 forwarding mode. + +V2V (VM to VM) is not supported but PVVP provides a more complete (and mroe realistic) alternative. + + +Supported Neutron Network Plugins and vswitches +----------------------------------------------- + +Any Virtual Switch, Any Encapsulation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +NFVbench is agnostic of the virtual switch implementation and has been tested with the following virtual switches: + +- ML2/VPP/VLAN (networking-vpp) +- OVS/VLAN and OVS-DPDK/VLAN +- ML2/ODL/VPP (OPNFV Fast Data Stack) + +SR-IOV +^^^^^^ + +By default, service chains will be based on virtual switch interfaces. + +NFVbench provides an option to select SR-IOV based virtual interfaces instead (thus bypassing any virtual switch) for those OpenStack system that include and support SR-IOV capable NICs on compute nodes. + + + + + + diff --git a/docs/testing/user/userguide/server.rst b/docs/testing/user/userguide/server.rst new file mode 100644 index 0000000..4495e19 --- /dev/null +++ b/docs/testing/user/userguide/server.rst @@ -0,0 +1,445 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. SPDX-License-Identifier: CC-BY-4.0 +.. (c) Cisco Systems, Inc + +NFVbench Server mode and NFVbench client API +============================================ + +NFVbench can run as an HTTP server to: + +- optionally provide access to any arbitrary HTLM files (HTTP server function) - this is optional +- service fully parameterized aynchronous run requests using the HTTP protocol (REST/json with polling) +- service fully parameterized run requests with interval stats reporting using the WebSocket/SocketIO protocol. + +Start the NFVbench server +------------------------- +To run in server mode, simply use the --server and optionally the listen address to use (--host , default is 0.0.0.0) and listening port to use (--port , default is 7555). + + +If HTTP files are to be serviced, they must be stored right under the http root path. +This root path must contain a static folder to hold static files (css, js) and a templates folder with at least an index.html file to hold the template of the index.html file to be used. +This mode is convenient when you do not already have a WEB server hosting the UI front end. +If HTTP files servicing is not needed (REST only or WebSocket/SocketIO mode), the root path can point to any dummy folder. + +Once started, the NFVbench server will be ready to service HTTP or WebSocket/SocketIO requests at the advertised URL. + +Example of NFVbench server start in a container: + +.. code-block:: bash + + # get to the container shell (assume the container name is "nfvbench") + docker exec -it nfvbench bash + # from the container shell start the NFVbench server in the background + nfvbench -c /tmp/nfvbench/nfvbench.cfg --server /tmp & + # exit container + exit + + + +HTTP Interface +-------------- + +/echo (GET) +^^^^^^^^^^^^^^^^^^^^^ + +This request simply returns whatever content is sent in the body of the request (only used for testing) + +/start_run (POST) +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This request will initiate a new NFVbench run asynchornously and can optionally pass the NFVbench configuration to run in the body (in JSON format). +See "NFVbench configuration JSON parameter" below for details on how to format this parameter. + +The request returns immediately with a json content indicating if there was an error (status=ERROR) or if the request was submitted successfully (status=PENDING). Example of return when the submission is successful: + +.. code-block:: bash + + { + "error_message": "nfvbench run still pending", + "status": "PENDING" + } + +/status (GET) +^^^^^^^^^^^^^^^^^^^^^^^ + +This request fetches the status of an asynchronous run. It will return in json format: + +- a request pending reply (if the run is still not completed) +- an error reply if there is no run pending +- or the complete result of the run + +The client can keep polling until the run completes. + +Example of return when the run is still pending: + +.. code-block:: bash + + { + "error_message": "nfvbench run still pending", + "status": "PENDING" + } + +Example of return when the run completes: + +.. code-block:: bash + + { + "result": {...} + "status": "OK" + } + + + +WebSocket/SocketIO events +------------------------- + +List of SocketIO events supported: + +Client to Server +^^^^^^^^^^^^^^^^ + +start_run: + + sent by client to start a new run with the configuration passed in argument (JSON). + The configuration can be any valid NFVbench configuration passed as a JSON document (see "NFVbench configuration JSON parameter" below) + +Server to Client +^^^^^^^^^^^^^^^^ + +run_interval_stats: + + sent by server to report statistics during a run + the message contains the statistics {'time_ms': time_ms, 'tx_pps': tx_pps, 'rx_pps': rx_pps, 'drop_pct': drop_pct} + +ndr_found: + + (during NDR-PDR search) + sent by server when the NDR rate is found + the message contains the NDR value {'rate_pps': ndr_pps} + +ndr_found: + + (during NDR-PDR search) + sent by server when the PDR rate is found + the message contains the PDR value {'rate_pps': pdr_pps} + + +run_end: + + sent by server to report the end of a run + the message contains the complete results in JSON format + +NFVbench configuration JSON parameter +------------------------------------- +The NFVbench configuration describes the parameters of an NFVbench run and can be passed to the NFVbench server as a JSON document. + +Default configuration +^^^^^^^^^^^^^^^^^^^^^ + +The simplest JSON document is the empty dictionary "{}" which indicates to use the default NFVbench configuration: + +- PVP +- NDR-PDR measurement +- 64 byte packets +- 1 flow per direction + +The entire default configuration can be viewed using the --show-json-config option on the cli: + +.. code-block:: bash + + # nfvbench --show-json-config + { + "availability_zone": null, + "compute_node_user": "root", + "compute_nodes": null, + "debug": false, + "duration_sec": 60, + "flavor": { + "disk": 0, + "extra_specs": { + "hw:cpu_policy": "dedicated", + "hw:mem_page_size": 2048 + }, + "ram": 8192, + "vcpus": 2 + }, + "flavor_type": "nfv.medium", + "flow_count": 1, + "generic_poll_sec": 2, + "generic_retry_count": 100, + "image_name": "nfvbenchvm", + "inter_node": false, + "internal_networks": { + "left": { + "name": "nfvbench-net0", + "subnet": "nfvbench-subnet0", + "cidr": "192.168.1.0/24", + }, + "right": { + "name": "nfvbench-net1", + "subnet": "nfvbench-subnet1", + "cidr": "192.168.2.0/24", + }, + "middle": { + "name": "nfvbench-net2", + "subnet": "nfvbench-subnet2", + "cidr": "192.168.3.0/24", + } + }, + "interval_sec": 10, + "json": null, + "loop_vm_name": "nfvbench-loop-vm", + "measurement": { + "NDR": 0.001, + "PDR": 0.1, + "load_epsilon": 0.1 + }, + "name": "(built-in default config)", + "no_cleanup": false, + "no_int_config": false, + "no_reset": false, + "no_tor_access": false, + "no_traffic": false, + "no_vswitch_access": false, + "openrc_file": "/tmp/nfvbench/openstack/openrc", + "openstack_defaults": "/tmp/nfvbench/openstack/defaults.yaml", + "openstack_setup": "/tmp/nfvbench/openstack/setup_data.yaml", + "rate": "ndr_pdr", + "service_chain": "PVP", + "service_chain_count": 1, + "sriov": false, + "std_json": null, + "tor": { + "switches": [ + { + "host": "172.26.233.12", + "password": "lab", + "port": 22, + "username": "admin" + } + ], + "type": "N9K" + }, + "traffic": { + "bidirectional": true, + "profile": "traffic_profile_64B" + }, + "traffic_generator": { + "default_profile": "trex-local", + "gateway_ip_addrs": [ + "1.1.0.2", + "2.2.0.2" + ], + "gateway_ip_addrs_step": "0.0.0.1", + "generator_profile": [ + { + "cores": 3, + "interfaces": [ + { + "pci": "0a:00.0", + "port": 0, + "switch_port": "Ethernet1/33", + "vlan": null + }, + { + "pci": "0a:00.1", + "port": 1, + "switch_port": "Ethernet1/34", + "vlan": null + } + ], + "intf_speed": "10Gbps", + "ip": "127.0.0.1", + "name": "trex-local", + "tool": "TRex" + } + ], + "host_name": "nfvbench_tg", + "ip_addrs": [ + "10.0.0.0/8", + "20.0.0.0/8" + ], + "ip_addrs_step": "0.0.0.1", + "mac_addrs": [ + "00:10:94:00:0A:00", + "00:11:94:00:0A:00" + ], + "step_mac": null, + "tg_gateway_ip_addrs": [ + "1.1.0.100", + "2.2.0.100" + ], + "tg_gateway_ip_addrs_step": "0.0.0.1" + }, + "traffic_profile": [ + { + "l2frame_size": [ + "64" + ], + "name": "traffic_profile_64B" + }, + { + "l2frame_size": [ + "IMIX" + ], + "name": "traffic_profile_IMIX" + }, + { + "l2frame_size": [ + "1518" + ], + "name": "traffic_profile_1518B" + }, + { + "l2frame_size": [ + "64", + "IMIX", + "1518" + ], + "name": "traffic_profile_3sizes" + } + ], + "unidir_reverse_traffic_pps": 1, + "vlan_tagging": true, + "vm_image_file": "file://172.29.172.152/downloads/nfvbench/nfvbenchvm-latest.qcow2", + "vts_ncs": { + "host": null, + "password": "secret", + "port": 22, + "username": "admin" + } + } + + +Common examples of JSON configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Use the default configuration but use 10000 flows per direction (instead of 1): + +.. code-block:: bash + + { "flow_count": 10000 } + + +Use default confguration but with 10000 flows, "EXT" chain and IMIX packet size: + +.. code-block:: bash + + { + "flow_count": 10000, + "service_chain": "EXT", + "traffic": { + "profile": "traffic_profile_IMIX" + }, + } + +A short run of 5 seconds at a fixed rate of 1Mpps (and everything else same as the default configuration): + +.. code-block:: bash + + { + "duration": 5, + "rate": "1Mpps" + } + +Example of interaction with the NFVbench server using HTTP and curl +------------------------------------------------------------------- +HTTP requests can be sent directly to the NFVbench server from CLI using curl from any host that can connect to the server (here we run it from the local host). + +This is a POST request to start a run using the default NFVbench configuration but with traffic generation disabled ("no_traffic" property is set to true): + +.. code-block:: bash + + [root@sjc04-pod3-mgmt ~]# curl -H "Accept: application/json" -H "Content-type: application/json" -X POST -d '{"no_traffic":true}' http://127.0.0.1:7555/start_run + { + "error_message": "nfvbench run still pending", + "status": "PENDING" + } + [root@sjc04-pod3-mgmt ~]# + +This request will return immediately with status set to "PENDING" if the request was started successfully. + +The status can be polled until the run completes. Here the poll returns a "PENDING" status, indicating the run is still not completed: + +.. code-block:: bash + + [root@sjc04-pod3-mgmt ~]# curl -G http://127.0.0.1:7555/status + { + "error_message": "nfvbench run still pending", + "status": "PENDING" + } + [root@sjc04-pod3-mgmt ~]# + +Finally, the status request returns a "OK" status along with the full results (truncated here): + +.. code-block:: bash + + [root@sjc04-pod3-mgmt ~]# curl -G http://127.0.0.1:7555/status + { + "result": { + "benchmarks": { + "network": { + "service_chain": { + "PVP": { + "result": { + "bidirectional": true, + "compute_nodes": { + "nova:sjc04-pod3-compute-4": { + "bios_settings": { + "Adjacent Cache Line Prefetcher": "Disabled", + "All Onboard LOM Ports": "Enabled", + "All PCIe Slots OptionROM": "Enabled", + "Altitude": "300 M", + ... + + "date": "2017-03-31 22:15:41", + "nfvbench_version": "0.3.5", + "openstack_spec": { + "encaps": "VxLAN", + "vswitch": "VTS" + } + }, + "status": "OK" + } + [root@sjc04-pod3-mgmt ~]# + + + +Example of interaction with the NFVbench server using a python CLI app (nfvbench_client) +---------------------------------------------------------------------------------------- +The module client/client.py contains an example of python class that can be used to control the NFVbench server from a python app using HTTP or WebSocket/SocketIO. + +The module client/nfvbench_client.py has a simple main application to control the NFVbench server from CLI. +The "nfvbench_client" wrapper script can be used to invoke the client front end (this wrapper is pre-installed in the NFVbench container) + +Example of invocation of the nfvbench_client front end, from the host (assume the name of the NFVbench container is "nfvbench"), +use the default NFVbench configuration but do not generate traffic (no_traffic property set to true, the full json result is truncated here): + +.. code-block:: bash + + [root@sjc04-pod3-mgmt ~]# docker exec -it nfvbench nfvbench_client -c '{"no_traffic":true}' http://127.0.0.1:7555 + {u'status': u'PENDING', u'error_message': u'nfvbench run still pending'} + {u'status': u'PENDING', u'error_message': u'nfvbench run still pending'} + {u'status': u'PENDING', u'error_message': u'nfvbench run still pending'} + + {u'status': u'OK', u'result': {u'date': u'2017-03-31 22:04:59', u'nfvbench_version': u'0.3.5', + u'config': {u'compute_nodes': None, u'compute_node_user': u'root', u'vts_ncs': {u'username': u'admin', u'host': None, u'password': u'secret', u'port': 22}, u'traffic_generator': {u'tg_gateway_ip_addrs': [u'1.1.0.100', u'2.2.0.100'], u'ip_addrs_step': u'0.0.0.1', u'step_mac': None, u'generator_profile': [{u'intf_speed': u'10Gbps', u'interfaces': [{u'pci': u'0a:00.0', u'port': 0, u'vlan': 1998, u'switch_port': None}, + + ... + + [root@sjc04-pod3-mgmt ~]# + +The http interface is used unless --use-socketio is defined. + +Example of invocation using Websocket/SocketIO, execute NFVbench using the default configuration but with a duration of 5 seconds and a fixed rate run of 5kpps. + +.. code-block:: bash + + [root@sjc04-pod3-mgmt ~]# docker exec -it nfvbench nfvbench_client -c '{"duration":5,"rate":"5kpps"}' --use-socketio http://127.0.0.1:7555 >results.json + + + + + + + -- cgit 1.2.3-korg