summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/development/overview/kvmfornfv_overview.rst2
-rw-r--r--docs/development/requirements/kvmfornfv_requirements.rst2
-rw-r--r--docs/index.rst30
-rw-r--r--docs/release/configguide/abstract.rst2
-rw-r--r--docs/release/configguide/index.rst4
-rw-r--r--docs/release/configguide/scenariomatrix.rst50
-rw-r--r--docs/release/installation/abstract.rst2
-rw-r--r--docs/release/installation/kvm4nfv-cicd.release.notes.rst36
-rw-r--r--docs/release/release-notes/release-notes.rst155
-rw-r--r--docs/release/scenarios/abstract.rst42
-rw-r--r--docs/release/scenarios/index.rst24
-rw-r--r--docs/release/scenarios/kvmfornfv.scenarios.description.rst423
-rw-r--r--docs/release/scenarios/os-nosdn-kvm-ha/os-nosdn-kvm-ha.description.rst10
-rwxr-xr-xdocs/release/scenarios/os-nosdn-kvm_ovs_dpdk-ha/index.rst (renamed from docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-ha/index.rst)6
-rw-r--r--docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-ha/os-nosdn-kvm_ovs_dpdk-ha.description.rst (renamed from docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-ha/os-nosdn-kvm_nfv_ovs_dpdk-ha.description.rst)132
-rwxr-xr-xdocs/release/scenarios/os-nosdn-kvm_ovs_dpdk-noha/index.rst (renamed from docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-noha/index.rst)6
-rw-r--r--docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-noha/os-nosdn-kvm_ovs_dpdk-noha.description.rst (renamed from docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-noha/os-nosdn-kvm_nfv_ovs_dpdk-noha.description.rst)133
-rwxr-xr-xdocs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/index.rst (renamed from docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha/index.rst)6
-rw-r--r--docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/os-nosdn-kvm_ovs_dpdk_bar-ha.description.rst (renamed from docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha.description.rst)20
-rwxr-xr-xdocs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-noha/index.rst (renamed from docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha/index.rst)6
-rw-r--r--docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-noha/os-nosdn-kvm_ovs_dpdk_bar-noha.description.rst (renamed from docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha.description.rst)26
-rw-r--r--docs/release/userguide/Ftrace.debugging.tool.userguide.rst68
-rw-r--r--docs/release/userguide/common.platform.render.rst2
-rw-r--r--docs/release/userguide/feature.userguide.render.rst2
-rw-r--r--docs/release/userguide/kvmfornfv.cyclictest-dashboard.userguide.rst104
-rw-r--r--docs/release/userguide/kvmfornfv_glossary.rst20
-rw-r--r--docs/release/userguide/live_migration.userguide.rst93
-rw-r--r--docs/release/userguide/low_latency.userguide.rst44
-rw-r--r--docs/release/userguide/openstack.rst14
-rw-r--r--docs/release/userguide/packet_forwarding.userguide.rst309
-rwxr-xr-xtests/vsperf.conf9
-rwxr-xr-xtests/vsperf.conf.sriov7
32 files changed, 1442 insertions, 347 deletions
diff --git a/docs/development/overview/kvmfornfv_overview.rst b/docs/development/overview/kvmfornfv_overview.rst
index 7c3005a04..01afcf460 100644
--- a/docs/development/overview/kvmfornfv_overview.rst
+++ b/docs/development/overview/kvmfornfv_overview.rst
@@ -38,7 +38,7 @@ The detailed understanding of this project is organized into different sections-
its execution.
* **configuration guide** - This provides guidance for configuring KVM4NFV
environment, even with the use of specific installer tools for deploying some
- components, available in the Danube release of OPNFV.
+ components, available in the Euphrates release of OPNFV.
* **scenarios** - This includes the sceanrios that are currently implemented in the
kvm4nfv project,features of each scenario and a general guide to how to deploy them.
* **userguide** - This provides the required technical assistance to the user, in
diff --git a/docs/development/requirements/kvmfornfv_requirements.rst b/docs/development/requirements/kvmfornfv_requirements.rst
index 6a879d2a1..5b49414b8 100644
--- a/docs/development/requirements/kvmfornfv_requirements.rst
+++ b/docs/development/requirements/kvmfornfv_requirements.rst
@@ -13,7 +13,7 @@ Infrastructure(NFVI).The existing hypervisors, however, are not necessarily
designed or targeted to meet the requirements for the NFVI.
This document specifies the list of requirements that need to be met as part
-of this "NFV Hypervisors-KVM" project in Danube release.
+of this "NFV Hypervisors-KVM" project in Euphrates release.
As part of this project we need to make collaborative efforts towards enabling
the NFV features.
diff --git a/docs/index.rst b/docs/index.rst
index b597d04eb..3baf3c44a 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -3,9 +3,9 @@
.. _kvmfornfv-documentation:
-******************
-Danube 1.0 Release
-******************
+*********************
+Euphrates 1.0 Release
+*********************
*************************
Overview of Documentation
@@ -80,48 +80,48 @@ KVM4NFV Scenarios Overview and Description
./release/scenarios/kvmfornfv.scenarios.description.rst
*******************************************************
-os-nosdn-kvm_nfv_ovs_dpdk-noha Overview and Description
+os-nosdn-kvm_ovs_dpdk-noha Overview and Description
*******************************************************
.. toctree::
- :caption: os-nosdn-kvm_nfv_ovs_dpdk-noha
+ :caption: os-nosdn-kvm_ovs_dpdk-noha
:numbered:
:maxdepth: 3
- ./release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-noha/os-nosdn-kvm_nfv_ovs_dpdk-noha.description.rst
+ ./release/scenarios/os-nosdn-kvm_ovs_dpdk-noha/os-nosdn-kvm_ovs_dpdk-noha.description.rst
*****************************************************
-os-nosdn-kvm_nfv_ovs_dpdk-ha Overview and Description
+os-nosdn-kvm_ovs_dpdk-ha Overview and Description
*****************************************************
.. toctree::
- :caption: os-nosdn-kvm_nfv_ovs_dpdk-ha
+ :caption: os-nosdn-kvm_ovs_dpdk-ha
:numbered:
:maxdepth: 3
- ./release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-ha/os-nosdn-kvm_nfv_ovs_dpdk-ha.description.rst
+ ./release/scenarios/os-nosdn-kvm_ovs_dpdk-ha/os-nosdn-kvm_ovs_dpdk-ha.description.rst
***********************************************************
-os-nosdn-kvm_nfv_ovs_dpdk_bar-noha Overview and Description
+os-nosdn-kvm_ovs_dpdk_bar-noha Overview and Description
***********************************************************
.. toctree::
- :caption: os-nosdn-kvm_nfv_ovs_dpdk_bar-noha
+ :caption: os-nosdn-kvm_ovs_dpdk_bar-noha
:numbered:
:maxdepth: 3
- ./release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha.description.rst
+ ./release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-noha/os-nosdn-kvm_ovs_dpdk_bar-noha.description.rst
*********************************************************
-os-nosdn-kvm_nfv_ovs_dpdk_bar-ha Overview and Description
+os-nosdn-kvm_ovs_dpdk_bar-ha Overview and Description
*********************************************************
.. toctree::
- :caption: os-nosdn-kvm_nfv_ovs_dpdk_bar-ha
+ :caption: os-nosdn-kvm_ovs_dpdk_bar-ha
:numbered:
:maxdepth: 3
- ./release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha.description.rst
+ ./release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/os-nosdn-kvm_ovs_dpdk_bar-ha.description.rst
******************
KVM4NFV User Guide
diff --git a/docs/release/configguide/abstract.rst b/docs/release/configguide/abstract.rst
index 3693bcab7..e531c9b95 100644
--- a/docs/release/configguide/abstract.rst
+++ b/docs/release/configguide/abstract.rst
@@ -6,7 +6,7 @@ Configuration Abstract
======================
This document provides guidance for the configurations available in the
-Danube release of OPNFV
+Euphrates release of OPNFV
The release includes four installer tools leveraging different technologies;
Apex, Compass4nfv, Fuel and JOID, which deploy components of the platform.
diff --git a/docs/release/configguide/index.rst b/docs/release/configguide/index.rst
index 6aa2786fb..735a28a79 100644
--- a/docs/release/configguide/index.rst
+++ b/docs/release/configguide/index.rst
@@ -7,8 +7,8 @@
Kvm4nfv Configuration Guide
***************************
-Danube 1.0
-------------
+Euphrates 1.0
+-------------
.. toctree::
:maxdepth: 2
diff --git a/docs/release/configguide/scenariomatrix.rst b/docs/release/configguide/scenariomatrix.rst
index 3da38ed60..0b967779a 100644
--- a/docs/release/configguide/scenariomatrix.rst
+++ b/docs/release/configguide/scenariomatrix.rst
@@ -12,11 +12,11 @@ scenarios. While our target is to establish parity across the installation tools
can provide all scenarios, the practical challenge of achieving that goal for any given feature and
release results in some disparity.
-Danube scenario overeview
+Euphrates scenario overeview
^^^^^^^^^^^^^^^^^^^^^^^^^
The following table provides an overview of the installation tools and available scenario's
-in the Danube release of OPNFV.
+in the Euphrates release of OPNFV.
Scenario status is indicated by a weather pattern icon. All scenarios listed with
a weather pattern are possible to deploy and run in your environment or a Pharos lab,
@@ -37,15 +37,16 @@ Weather pattern icon legend:
+---------------------------------------------+----------------------------------------------------------+
Scenarios that are not yet in a state of "Stable, no known issues" will continue to be stabilised
-and updates will be made on the stable/danube branch. While we intend that all Danube
+and updates will be made on the stable/euphrates branch. While we intend that all Euphrates
scenarios should be stable it is worth checking regularly to see the current status. Due to
-our dependency on upstream communities and code some issues may not be resolved prior to the D release.
+our dependency on upstream communities and code, some issues may not be resolved prior to E release.
Scenario Naming
^^^^^^^^^^^^^^^
In OPNFV scenarios are identified by short scenario names, these names follow a scheme that
-identifies the key components and behaviours of the scenario. The rules for scenario naming are as follows:
+identifies the key components and behaviours of the scenario. The rules for scenario naming are as
+follows:
.. code:: bash
@@ -76,7 +77,8 @@ Details of the fields are
* **[option]:** optional
* Used for the scenarios those do not fit into naming scheme.
- * The optional field in the short scenario name should not be included if there is no optional scenario.
+ * The optional field in the short scenario name should not be included if there is no optional
+ scenario.
Some examples of supported scenario names are:
@@ -92,38 +94,44 @@ Some examples of supported scenario names are:
* This is an OpenStack deployment using OpenDaylight and OVS enabled with SFC features
- * **os-nosdn-kvm_nfv_ovs_dpdk-ha**
+ * **os-nosdn-kvm_ovs_dpdk-ha**
- * This is an Openstack deployment with high availability using OVS, DPDK including the OPNFV enhanced KVM hypervisor
+ * This is an Openstack deployment with high availability using OVS, DPDK including the OPNFV
+ enhanced KVM hypervisor
* This deployment has ``3-Contoller and 2-Compute nodes``
- * **os-nosdn-kvm_nfv_ovs_dpdk-noha**
+ * **os-nosdn-kvm_ovs_dpdk-noha**
- * This is an Openstack deployment without high availability using OVS, DPDK including the OPNFV enhanced KVM hypervisor
+ * This is an Openstack deployment without high availability using OVS, DPDK including the OPNFV
+ enhanced KVM hypervisor
* This deployment has ``1-Contoller and 3-Compute nodes``
- * **os-nosdn-kvm_nfv_ovs_dpdk_bar-ha**
+ * **os-nosdn-kvm_ovs_dpdk_bar-ha**
- * This is an Openstack deployment with high availability using OVS, DPDK including the OPNFV enhanced KVM hypervisor
+ * This is an Openstack deployment with high availability using OVS, DPDK including the OPNFV
+ enhanced KVM hypervisor
and Barometer
* This deployment has ``3-Contoller and 2-Compute nodes``
- * **os-nosdn-kvm_nfv_ovs_dpdk_bar-noha**
+ * **os-nosdn-kvm_ovs_dpdk_bar-noha**
- * This is an Openstack deployment without high availability using OVS, DPDK including the OPNFV enhanced KVM hypervisor
+ * This is an Openstack deployment without high availability using OVS, DPDK including the OPNFV
+ enhanced KVM hypervisor
and Barometer
* This deployment has ``1-Contoller and 3-Compute nodes``
Installing your scenario
^^^^^^^^^^^^^^^^^^^^^^^^
-There are two main methods of deploying your target scenario, one method is to follow this guide which will
-walk you through the process of deploying to your hardware using scripts or ISO images, the other method is
-to set up a Jenkins slave and connect your infrastructure to the OPNFV Jenkins master.
+There are two main methods of deploying your target scenario, one method is to follow this guide
+which will walk you through the process of deploying to your hardware using scripts or ISO images,
+the other method is to set up a Jenkins slave and connect your infrastructure to the OPNFV Jenkins
+master.
-For the purposes of evaluation and development a number of Danube scenarios are able to be deployed
-virtually to mitigate the requirements on physical infrastructure. Details and instructions on performing
-virtual deployments can be found in the installer specific installation instructions.
+For the purposes of evaluation and development a number of Euphrates scenarios are able to be
+deployed virtually to mitigate the requirements on physical infrastructure. Details and instructions
+on performing virtual deployments can be found in the installer specific installation instructions.
-To set up a Jenkins slave for automated deployment to your lab, refer to the `Jenkins slave connect guide.
+To set up a Jenkins slave for automated deployment to your lab, refer to the `Jenkins slave connect
+guide.
<http://artifacts.opnfv.org/brahmaputra.1.0/docs/opnfv-jenkins-slave-connection.brahmaputra.1.0.html>`_
diff --git a/docs/release/installation/abstract.rst b/docs/release/installation/abstract.rst
index a53450eff..5c9c81d8a 100644
--- a/docs/release/installation/abstract.rst
+++ b/docs/release/installation/abstract.rst
@@ -7,5 +7,5 @@ Abstract
********
This document will give the instructions to user on how to deploy available
-KVM4NFV build scenario verfied for the Danube release of the OPNFV
+KVM4NFV build scenario verfied for the Euphrates release of the OPNFV
platform.
diff --git a/docs/release/installation/kvm4nfv-cicd.release.notes.rst b/docs/release/installation/kvm4nfv-cicd.release.notes.rst
index 415182bc7..dae7bc1ca 100644
--- a/docs/release/installation/kvm4nfv-cicd.release.notes.rst
+++ b/docs/release/installation/kvm4nfv-cicd.release.notes.rst
@@ -10,7 +10,8 @@ Release Note for KVM4NFV CICD
Abstract
--------
-This document contains the release notes for the Danube release of OPNFV when using KVM4NFV CICD process.
+This document contains the release notes for the Euphrates release of OPNFV when using KVM4NFV CICD
+process.
Introduction
------------
@@ -33,7 +34,7 @@ Release Data
| **Release designation** | |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2017-03-27 |
+| **Release date** | 2017-10-06 |
| | |
+--------------------------------------+--------------------------------------+
| **Purpose of the delivery** | - Automate the KVM4VFV CICD scenario |
@@ -64,19 +65,34 @@ Feature additions
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-34 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-72 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-57 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-73 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-58 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-78 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-59 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-86 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-60 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-87 |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: | NFV Hypervisors-KVMFORNFV-88 |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: | NFV Hypervisors-KVMFORNFV-89 |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: | VSPERF-510 |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: | YARDSTICK-783 |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: | YARDSTICK-815 |
| | |
+--------------------------------------+--------------------------------------+
@@ -89,7 +105,7 @@ Known issues
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-75 |
+| | |
+--------------------------------------+--------------------------------------+
Workarounds
@@ -99,5 +115,5 @@ See JIRA: https://jira.opnfv.org/projects
References
==========
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/euphrates
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index e08341ffb..908e8d499 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -10,7 +10,7 @@ Release Notes
Abstract
---------
-This document provides the release notes for Danube 1.0 release of KVM4NFV.
+This document provides the release notes for Euphrates 1.0 release of KVM4NFV.
**Contents**
@@ -28,26 +28,29 @@ This document provides the release notes for Danube 1.0 release of KVM4NFV.
Version history
---------------
-+--------------------+--------------------+--------------------+----------------------+
-| **Date** | **Ver.** | **Author** | **Comment** |
-| | | | |
-+--------------------+--------------------+--------------------+----------------------+
-|2016-08-22 | 0.1.0 | | Colorado 1.0 release |
-| | | | |
-+--------------------+--------------------+--------------------+----------------------+
-|2017-03-27 | 0.1.0 | | Danube 1.0 release |
-| | | | |
-+--------------------+--------------------+--------------------+----------------------+
++--------------------+--------------------+--------------------+------------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++--------------------+--------------------+--------------------+------------------------+
+|2016-08-22 | 0.1.0 | | Colorado 1.0 release |
+| | | | |
++--------------------+--------------------+--------------------+------------------------+
+|2017-03-27 | 0.1.0 | | Danube 1.0 release |
+| | | | |
++--------------------+--------------------+--------------------+------------------------+
+|2017-10-06 | 0.1.0 | | Euphrates 1.0 release |
+| | | | |
++--------------------+--------------------+--------------------+------------------------+
Important notes
---------------
-The KVM4NFV project is currently supported on the Fuel installer.
+The KVM4NFV project is currently supported on Fuel and Apex installer.
Summary
-------
-This Danube 1.0 release provides *KVM4NFV* as a framework to enhance the
+This Euphrates 1.0 release provides *KVM4NFV* as a framework to enhance the
KVM Hypervisor for NFV and OPNFV scenario testing, automated in the OPNFV
CI pipeline, including:
@@ -57,9 +60,8 @@ CI pipeline, including:
* Cyclictests execution to check the latency
-* “os-nosdn-kvm-ha”,“os-nosdn-kvm_nfv_ovs_dpdk-ha”,“os-nosdn-kvm_nfv_ovs_dpdk-noha”,“os-nosdn-kvm_nfv_ovs_dpdk_bar-ha”,
- “os-nosdn-kvm_nfv_ovs_dpdk_bar-noha” Scenarios testing for ``high availability/no-high avaliability``
- configuration using Fuel installer
+* “os-nosdn-kvm_ovs_dpdk-ha”,“os-nosdn-kvm_ovs_dpdk-noha”, Scenarios testing for
+ ``high availability/no-high avaliability`` configuration using Apex installer
* Documentation created for,
@@ -88,13 +90,13 @@ Release Data
| **Repo/commit-ID** | kvmfornfv |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Danube |
+| **Release designation** | Euphrates |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2017-03-27 |
+| **Release date** | 2017-10-06 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Danube 1.0 Releases |
+| **Purpose of the delivery** | OPNFV Euphrates 1.0 Releases |
| | |
+--------------------------------------+--------------------------------------+
@@ -104,20 +106,22 @@ Version change
1 Module version changes
~~~~~~~~~~~~~~~~~~~~~~~~~~
-This is the Danube 1.0 main release. It is based on following upstream
+This is the Euphrates 1.0 main release. It is based on following upstream
versions:
* RT Kernel 4.4.50-rt62
-* QEMU 2.6
+* QEMU 2.9.0
-* Fuel plugin based on Fuel 10.0
+* Apex based on Openstack Ocata
-This is the second tracked release of KVM4NFV
+
+This is the third tracked release of KVM4NFV
2 Document version changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
This is the second version of the KVM4NFV framework in OPNFV.
Reason for version
@@ -130,28 +134,34 @@ Reason for version
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-57 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-72 |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: | NFV Hypervisors-KVMFORNFV-73 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-58 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-78 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-59 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-86 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-61 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-87 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-62 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-88 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-63 |
+| JIRA: | NFV Hypervisors-KVMFORNFV-89 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-64 |
+| JIRA: | VSPERF-510 |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: | NFV Hypervisors-KVMFORNFV-65 |
+| JIRA: | YARDSTICK-783 |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: | YARDSTICK-815 |
| | |
+--------------------------------------+--------------------------------------+
@@ -161,29 +171,37 @@ A brief ``Description of the the JIRA tickets``:
| **JIRA REFERENCE** | **DESCRIPTION** |
| | |
+---------------------------------------+-------------------------------------------------------------+
-| KVMFORNFV-57 | CI/CD Integration into Yardstick |
+| KVMFORNFV-72 | Define and integrate additional scenario - KVM+OVS+DPDK |
+| | with HA for bare metal and virtual environments |
++---------------------------------------+-------------------------------------------------------------+
+| KVMFORNFV-73 | Define and integrate additional scenario - KVM+OVS+DPDK |
+| | with NOHA for bare metal and virtual environments |
| | |
+---------------------------------------+-------------------------------------------------------------+
-| KVMFORNFV-58 | Complete the integration of test plan into Yardstick |
-| | and Jenkins infrastructure to include latency testing |
+| KVMFORNFV-78 | Scenarios in Euphrates release for KVM for NFV |
| | |
+---------------------------------------+-------------------------------------------------------------+
-| KVMFORNFV-59 | Enable capability to publish results on Yardstick Dashboard |
+| KVMFORNFV-86 | Live Migration tests in kvmfornfv repository |
| | |
+---------------------------------------+-------------------------------------------------------------+
-| KVMFORNFV-61 | Define and integrate additional scenario - KVM+OVS+DPDK |
-| | with HA and NOHA for baremetal and virtual environments |
+| KVMFORNFV-87 | Packet forwarding test type pxp - multiple guests |
| | |
+---------------------------------------+-------------------------------------------------------------+
-| KVMFORNFV-62 | Define and integrate additional scenario - KVM+OVS+DPDK+BAR |
-| | with HA and NOHA for bare metal and virtual environments |
+| KVMFORNFV-88 | Apex environment setup for local machine to debug Apex |
+| | related integration issues |
| | |
+---------------------------------------+-------------------------------------------------------------+
-| KVMFORNFV-63 | Setup Local fuel environment |
+| KVMFORNFV-89 | Generate kernel debug-info rpm |
| | |
+---------------------------------------+-------------------------------------------------------------+
-| KVMFORNFV-64 | Fuel environment setup for local machine to debug Fuel |
-| | related integration issues |
+| VSPERF-510 | KVM optimizations |
+| | |
++---------------------------------------+-------------------------------------------------------------+
+| YARDSTICK-783 | To update Grafana dashboard for kvmfornfv packet forwarding |
+| | test cases |
++---------------------------------------+-------------------------------------------------------------+
+| YARDSTICK-815 | Implementation of breaktrace option for cyclictest |
+| | |
+---------------------------------------+-------------------------------------------------------------+
Deliverables
@@ -191,34 +209,20 @@ Deliverables
1 Software deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~
-* Danube 1.0 release of the KVM4NFV RPM and debian for kvm4nfv
+* Euphrates 1.0 release of the KVM4NFV RPM and debian for kvm4nfv
-* Added the following scenarios as part of D-Release:
+* Kernel debug-info rpm and debian is generated as part of E-release
- * os-nosdn-kvm_nfv_ovs_dpdk-noha
+* Integrated the following scenarios in APEX as part of E-Release:
- * os-nosdn-kvm_nfv_ovs_dpdk_bar-noha
+ * os-nosdn-kvm_ovs_dpdk-noha
- * os-nosdn-kvm_nfv_ovs_dpdk-ha
-
- * os-nosdn-kvm_nfv_ovs_dpdk_bar-ha
+ * os-nosdn-kvm_ovs_dpdk-ha
* Configured influxdb and `Graphana_dashboard`_ for publishing kvm4nfv test results
.. _Graphana_dashboard: http://testresults.opnfv.org/grafana/dashboard/db/kvmfornfv-cyclictest
-* Cyclictest test case is successfully implemented, it has the below test types.,
-
- * idle-idle
-
- * CPUstress-idle
-
- * IOstress-idle
-
- * Memorystress-idle
-
-* Implemented Noisy Neighbour feature ., cyclictest under stress testing is implemented
-
* Packet forwarding test case is implemented and it supports the following test types currently,
* Packet forwarding to Host
@@ -227,11 +231,20 @@ Deliverables
* Packet forwarding to Guest using SRIOV
-* Ftrace debugging tool is supported in D-Release. The logs collected by ftrace are stored in artifacts for future needs
+ * Packet forwarding to multiple guests
+
+* Breaktrace option is implemented to monitor the latency values obatined by the cyclictest
+
+* Live Migration test case is implemented and the following values are collected:
-* PCM Utility is part of D-Release. The future scope may include collection of read/write data and publishing in grafana
+ * Total time
-* Either Apex or Fuel can be used for deployment of os-nosdn-kvm-ha scenario
+ * Down time
+
+ * Setup time
+
+* Either Apex or Fuel can be used for deployment of os-nosdn-kvm-ha, os-nosdn-kvm_ovs_dpdk-ha and
+os-nosdn-kvm_ovs_dpdk-noha scenarios
+------------------------------------------+------------------+-----------------+
| **Scenario Name** | **Apex** | **Fuel** |
@@ -239,18 +252,16 @@ Deliverables
+==========================================+==================+=================+
| - os-nosdn-kvm-ha | ``Y`` | ``Y`` |
+------------------------------------------+------------------+-----------------+
-| - os-nosdn-kvm_nfv_ovs_dpdk-noha | | ``Y`` |
+| - os-nosdn-kvm_ovs_dpdk-noha | ``Y`` | ``Y`` |
+------------------------------------------+------------------+-----------------+
-| - os-nosdn-kvm_nfv_ovs_dpdk-ha | | ``Y`` |
+| - os-nosdn-kvm_ovs_dpdk-ha | ``Y`` | ``Y`` |
+------------------------------------------+------------------+-----------------+
-| - os-nosdn-kvm_nfv_ovs_dpdk_bar-noha | | ``Y`` |
+| - os-nosdn-kvm_ovs_dpdk_bar-noha | | ``Y`` |
+------------------------------------------+------------------+-----------------+
-| - os-nosdn-kvm_nfv_ovs_dpdk_bar-ha | | ``Y`` |
+| - os-nosdn-kvm_ovs_dpdk_bar-ha | | ``Y`` |
+------------------------------------------+------------------+-----------------+
-* Future scope may include adding Apex support for all the remaining scenarios
-
-* The below documents are delivered for Danube KVM4NFV Release:
+* The below documents are delivered for Euphrates KVM4NFV Release:
* User Guide
@@ -273,6 +284,6 @@ Deliverables
References
----------
-For more information on the KVM4NFV Danube release, please see:
+For more information on the KVM4NFV Euphrates release, please see:
https://wiki.opnfv.org/display/kvm/
diff --git a/docs/release/scenarios/abstract.rst b/docs/release/scenarios/abstract.rst
index dcdd62fa9..2ccc437a5 100644
--- a/docs/release/scenarios/abstract.rst
+++ b/docs/release/scenarios/abstract.rst
@@ -4,39 +4,35 @@
*****************
Scenario Abstract
*****************
-This chapter includes detailed explanation of various sceanrios files deployed as part
-of kvm4nfv D-Release.
+This chapter includes detailed explanation of various scenarios files deployed as part
+of kvm4nfv E-Release.
Release Features
----------------
-+------------------------------------------+------------------+-----------------+
-| **Scenario Name** | **Colorado** | **Danube** |
-| | | |
-+==========================================+==================+=================+
-| - os-nosdn-kvm-ha | ``Y`` | ``Y`` |
-+------------------------------------------+------------------+-----------------+
-| - os-nosdn-kvm_nfv_ovs_dpdk-noha | | ``Y`` |
-+------------------------------------------+------------------+-----------------+
-| - os-nosdn-kvm_nfv_ovs_dpdk-ha | | ``Y`` |
-+------------------------------------------+------------------+-----------------+
-| - os-nosdn-kvm_nfv_ovs_dpdk_bar-noha | | ``Y`` |
-+------------------------------------------+------------------+-----------------+
-| - os-nosdn-kvm_nfv_ovs_dpdk_bar-ha | | ``Y`` |
-+------------------------------------------+------------------+-----------------+
++------------------------------------------+------------------+-----------------+-------------------+
+| **Scenario Name** | **Colorado** | **Danube** | **Euphrates** |
+| | | | |
++==========================================+==================+=================+===================+
+| - os-nosdn-kvm-ha | ``Y`` | ``Y`` | |
++------------------------------------------+------------------+-----------------+-------------------+
+| - os-nosdn-kvm_ovs_dpdk-noha | | ``Y`` | ``Y`` |
++------------------------------------------+------------------+-----------------+-------------------+
+| - os-nosdn-kvm_ovs_dpdk-ha | | ``Y`` | ``Y`` |
++------------------------------------------+------------------+-----------------+-------------------+
+| - os-nosdn-kvm_ovs_dpdk_bar-noha | | ``Y`` | |
++------------------------------------------+------------------+-----------------+-------------------+
+| - os-nosdn-kvm_ovs_dpdk_bar-ha | | ``Y`` | |
++------------------------------------------+------------------+-----------------+-------------------+
-D- Release Scenario's overview
+E- Release Scenario's overview
-------------------------------
+------------------------------------------+-----------------------+---------------------+------------------+----------+----------+
| **Scenario Name** | **No of Controllers** | **No of Computes** | **Plugin Names** | **DPDK** | **OVS** |
| | | | | | |
+==========================================+=======================+=====================+==================+==========+==========+
-| - ``os-nosdn-kvm_nfv_ovs_dpdk-noha`` | 1 | 3 | KVM | Y | Y |
+| - ``os-nosdn-kvm_ovs_dpdk-noha`` | 1 | 1 | KVM | Y | Y |
+------------------------------------------+-----------------------+---------------------+------------------+----------+----------+
-| - ``os-nosdn-kvm_nfv_ovs_dpdk-ha`` | 3 | 2 | KVM | Y | Y |
-+------------------------------------------+-----------------------+---------------------+------------------+----------+----------+
-| - ``os-nosdn-kvm_nfv_ovs_dpdk_bar-noha`` | 1 | 3 | KVM & BAR | Y | Y |
-+------------------------------------------+-----------------------+---------------------+------------------+----------+----------+
-| - ``os-nosdn-kvm_nfv_ovs_dpdk_bar-ha`` | 3 | 2 | KVM & BAR | Y | Y |
+| - ``os-nosdn-kvm_ovs_dpdk-ha`` | 3 | 2 | KVM | Y | Y |
+------------------------------------------+-----------------------+---------------------+------------------+----------+----------+
diff --git a/docs/release/scenarios/index.rst b/docs/release/scenarios/index.rst
index f1f93c31a..def82debd 100644
--- a/docs/release/scenarios/index.rst
+++ b/docs/release/scenarios/index.rst
@@ -16,45 +16,45 @@ Scenario Overview and Description
./kvmfornfv.scenarios.description.rst
*******************************************************
-os-nosdn-kvm_nfv_ovs_dpdk-noha Overview and Description
+os-nosdn-kvm_ovs_dpdk-noha Overview and Description
*******************************************************
.. toctree::
- :caption: os-nosdn-kvm_nfv_ovs_dpdk-noha
+ :caption: os-nosdn-kvm_ovs_dpdk-noha
:numbered:
:maxdepth: 3
- ./os-nosdn-kvm_nfv_ovs_dpdk-noha/os-nosdn-kvm_nfv_ovs_dpdk-noha.description.rst
+ ./os-nosdn-kvm_ovs_dpdk-noha/os-nosdn-kvm_ovs_dpdk-noha.description.rst
*****************************************************
-os-nosdn-kvm_nfv_ovs_dpdk-ha Overview and Description
+os-nosdn-kvm_ovs_dpdk-ha Overview and Description
*****************************************************
.. toctree::
- :caption: os-nosdn-kvm_nfv_ovs_dpdk-ha
+ :caption: os-nosdn-kvm_ovs_dpdk-ha
:numbered:
:maxdepth: 3
- ./os-nosdn-kvm_nfv_ovs_dpdk-ha/os-nosdn-kvm_nfv_ovs_dpdk-ha.description.rst
+ ./os-nosdn-kvm_ovs_dpdk-ha/os-nosdn-kvm_ovs_dpdk-ha.description.rst
***********************************************************
-os-nosdn-kvm_nfv_ovs_dpdk_bar-noha Overview and Description
+os-nosdn-kvm_ovs_dpdk_bar-noha Overview and Description
***********************************************************
.. toctree::
- :caption: os-nosdn-kvm_nfv_ovs_dpdk_bar-noha
+ :caption: os-nosdn-kvm_ovs_dpdk_bar-noha
:numbered:
:maxdepth: 3
- ./os-nosdn-kvm_nfv_ovs_dpdk_bar-noha/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha.description.rst
+ ./os-nosdn-kvm_ovs_dpdk_bar-noha/os-nosdn-kvm_ovs_dpdk_bar-noha.description.rst
*********************************************************
-os-nosdn-kvm_nfv_ovs_dpdk_bar-ha Overview and Description
+os-nosdn-kvm_ovs_dpdk_bar-ha Overview and Description
*********************************************************
.. toctree::
- :caption: os-nosdn-kvm_nfv_ovs_dpdk_bar-ha
+ :caption: os-nosdn-kvm_ovs_dpdk_bar-ha
:numbered:
:maxdepth: 3
- ./os-nosdn-kvm_nfv_ovs_dpdk_bar-ha/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha.description.rst
+ ./os-nosdn-kvm_ovs_dpdk_bar-ha/os-nosdn-kvm_ovs_dpdk_bar-ha.description.rst
diff --git a/docs/release/scenarios/kvmfornfv.scenarios.description.rst b/docs/release/scenarios/kvmfornfv.scenarios.description.rst
index 5a5328666..bc8c585e7 100644
--- a/docs/release/scenarios/kvmfornfv.scenarios.description.rst
+++ b/docs/release/scenarios/kvmfornfv.scenarios.description.rst
@@ -29,10 +29,10 @@ Version Features
+-----------------------------+---------------------------------------------+
| | - High Availability/No-High Availability |
| | deployment configuration of KVM4NFV |
-| | software suite |
-| Danube | - Multi-node setup with 3 controller and |
+| | software suite using Fuel |
+| | - Multi-node setup with 3 controller and |
| | 2 compute nodes are deployed for HA |
-| | - Multi-node setup with 1 controller and |
+| Danube | - Multi-node setup with 1 controller and |
| | 3 compute nodes are deployed for NO-HA |
| | - Scenarios os-nosdn-kvm_ovs_dpdk-ha, |
| | os-nosdn-kvm_ovs_dpdk_bar-ha, |
@@ -40,6 +40,18 @@ Version Features
| | os-nosdn-kvm_ovs_dpdk_bar-noha |
| | are supported |
+-----------------------------+---------------------------------------------+
+| | - High Availability/No-High Availability |
+| | deployment configuration of KVM4NFV |
+| | software suite using Apex |
+| | - Multi-node setup with 3 controller and |
+| Euphrates | 2 compute nodes are deployed for HA |
+| | - Multi-node setup with 1 controller and |
+| | 1 compute node are deployed for NO-HA |
+| | - Scenarios os-nosdn-kvm_ovs_dpdk-ha, |
+| | os-nosdn-kvm_ovs_dpdk-noha, |
+| | are supported |
++-----------------------------+---------------------------------------------+
+
Introduction
@@ -53,14 +65,18 @@ This OPNFV software suite includes OPNFV KVM4NFV latest software packages
for Linux Kernel and QEMU patches for achieving low latency and also OPNFV Barometer for traffic,
performance and platform monitoring.
-High Availability feature is achieved by deploying OpenStack
-multi-node setup with 1 Fuel-Master,3 controllers and 2 computes nodes.
+When using Fuel installer, High Availability feature is achieved by deploying OpenStack
+multi-node setup with 1 Fuel-Master,3 controllers and 2 computes nodes. No-High Availability
+feature is achieved by deploying OpenStack multi-node setup with 1 Fuel-Master,1 controllers
+and 3 computes nodes.
-No-High Availability feature is achieved by deploying OpenStack
-multi-node setup with 1 Fuel-Master,1 controllers and 3 computes nodes.
+When using Apex installer, High Availability feature is achieved by deploying Openstack
+multi-node setup with 1 undercloud, 3 overcloud controllers and 2 overcloud compute nodes.
+No-High Availability feature is achieved by deploying Openstack multi-node setup with
+1 undercloud, 1 overcloud controller and 1 overcloud compute nodes.
KVM4NFV packages will be installed on compute nodes as part of deployment.
-The scenario testcase deploys a multi-node setup by using OPNFV Fuel deployer.
+The scenario testcase deploys a multi-node setup by using OPNFV Fuel and Apex deployer.
System pre-requisites
---------------------
@@ -103,11 +119,26 @@ If Nested virtualization is disabled, enable it by,
Environment Setup
-----------------
+**Enable network access after the installation**
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For **CentOS**.,
+Login as "root" user. After the installation complete, the Ethernet interfaces are not enabled by
+the default in Centos 7, you need to change the line "ONBOOT=no" to "ONBOOT=yes" in the network
+interface configuration file (such as ifcfg-enp6s0f0 or ifcfg-em1 … whichever you want to connect)
+in /etc/sysconfig/network-scripts sub-directory. The default BOOTPROTO is dhcp in the network
+interface configuration file. Then use following command to enable the network access:
+
+.. code:: bash
+
+ systemctl restart network
+
**Configuring Proxy**
~~~~~~~~~~~~~~~~~~~~~
For **Ubuntu**.,
-Create an apt.conf file in /etc/apt if it doesn't exist. Used to set proxy for apt-get if working behind a proxy server.
+Create an apt.conf file in /etc/apt if it doesn't exist. Used to set proxy for apt-get if working
+behind a proxy server.
.. code:: bash
@@ -123,6 +154,124 @@ Edit /etc/yum.conf to work behind a proxy server by adding the below line.
$ echo "proxy=http://<username>:<password>@<proxy>:<port>/" >> /etc/yum.conf
+**Install redsocks**
+~~~~~~~~~~~~~~~~~~~~
+
+For **CentOS**.,
+Since there is no redsocks package for CentOS Linux release 7.2.1511, you need build redsocks from
+source yourself. Using following commands to create “proxy_redsocks” sub-directory at /root:
+
+.. code:: bash
+
+ cd ~
+ mkdir proxy_redsocks
+
+Since you can’t download file at your Centos system yet. At other Centos or Ubuntu system, use
+following command to download redsocks source for Centos into a file “redsocks-src”;
+
+.. code:: bash
+
+ wget -O redsocks-src --no-check-certificate https://github.com/darkk/redsocks/zipball/master
+
+Also download libevent-devel-2.0.21-4.el7.x86_64.rpm by:
+
+.. code:: bash
+
+ wget ftp://fr2.rpmfind.net/linux/centos/7.2.1511/os/x86_64/Packages/libevent-devel-2.0.21-4.el7.x86_64.rpm
+
+Copy both redsock-src and libevent-devel-2.0.21-4.el7.x86_64.rpm files into ~/proxy_redsocks in your
+Centos system by “scp”.
+
+Back to your Centos system, first install libevent-devel using libevent-devel-2.0.21-4.el7.x86_64.rpm
+as below:
+
+.. code:: bash
+
+ cd ~/proxy_redsocks
+ yum install –y libevent-devel-2.0.21-4.el7.x86_64.rpm
+
+Build redsocks by:
+
+.. code:: bash
+
+ cd ~/proxy_redsocks
+ unzip redsocks-src
+ cd darkk-redsocks-78a73fc
+ yum –y install gcc
+ make
+ cp redsocks ~/proxy_redsocks/.
+
+Create a redsocks.conf in ~/proxy_redsocks with following contents:
+
+.. code:: bash
+
+ base {
+ log_debug = on;
+ log_info = on;
+ log = "file:/root/proxy.log";
+ daemon = on;
+ redirector = iptables;
+ }
+ redsocks {
+ local_ip = 0.0.0.0;
+ local_port = 6666;
+ // socks5 proxy server
+ ip = <proxy>;
+ port = 1080;
+ type = socks5;
+ }
+ redudp {
+ local_ip = 0.0.0.0;
+ local_port = 8888;
+ ip = <proxy>;
+ port = 1080;
+ }
+ dnstc {
+ local_ip = 127.0.0.1;
+ local_port = 5300;
+ }
+
+Start redsocks service by:
+
+.. code:: bash
+
+ cd ~/proxy_redsocks
+ ./redsocks –c redsocks.conf
+
+*Note*
+The redsocks service is not persistent and you need to execute the above-mentioned commands after
+every reboot.
+
+Create intc-proxy.sh in ~/proxy_redsocks with following contents and make it executable by
+“chmod +x intc-proxy.sh”:
+
+.. code:: bash
+
+ iptables -t nat -N REDSOCKS
+ iptables -t nat -A REDSOCKS -d 0.0.0.0/8 -j RETURN
+ iptables -t nat -A REDSOCKS -d 10.0.0.0/8 -j RETURN
+ iptables -t nat -A REDSOCKS -d 127.0.0.0/8 -j RETURN
+ iptables -t nat -A REDSOCKS -d 169.254.0.0/16 -j RETURN
+ iptables -t nat -A REDSOCKS -d 172.16.0.0/12 -j RETURN
+ iptables -t nat -A REDSOCKS -d 192.168.0.0/16 -j RETURN
+ iptables -t nat -A REDSOCKS -d 224.0.0.0/4 -j RETURN
+ iptables -t nat -A REDSOCKS -d 240.0.0.0/4 -j RETURN
+ iptables -t nat -A REDSOCKS -p tcp -j REDIRECT --to-ports 6666
+ iptables -t nat -A REDSOCKS -p udp -j REDIRECT --to-ports 8888
+ iptables -t nat -A OUTPUT -p tcp -j REDSOCKS
+ iptables -t nat -A PREROUTING -p tcp -j REDSOCKS
+
+Enable the REDSOCKS nat chain rule by:
+
+.. code:: bash
+
+ cd ~/proxy_redsocks
+ ./intc-proxy.sh
+
+*Note*
+These REDSOCKS nat chain rules are not persistent and you need to execute the above-mentioned
+commands after every reboot.
+
**Network Time Protocol (NTP) setup and configuration**
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -133,7 +282,8 @@ Install ntp by:
$ sudo apt-get update
$ sudo apt-get install -y ntp
-Insert the following two lines after “server ntp.ubuntu.com” line and before “ # Access control configuration; see `link`_ for” line in /etc/ntp.conf file:
+Insert the following two lines after “server ntp.ubuntu.com” line and before “ # Access control
+configuration; see `link`_ for” line in /etc/ntp.conf file:
.. _link: /usr/share/doc/ntp-doc/html/accopt.html
@@ -153,8 +303,9 @@ Scenario Testing
There are three ways of performing scenario testing,
- 1 Fuel
- - 2 OPNFV-Playground
- - 3 Jenkins Project
+ - 2 Apex
+ - 3 OPNFV-Playground
+ - 4 Jenkins Project
Fuel
~~~~
@@ -235,14 +386,16 @@ To include fuel plugins in the deployment configuration file, use the “stack-e
#module-config overrides
**Note:**
-The “module-config-name” and “module-config-version” should be same as the name of plugin configuration file.
+The “module-config-name” and “module-config-version” should be same as the name of plugin
+configuration file.
The “module-config-override” is used to configure the plugin by overrriding the corresponding keys in
the plugin config yaml file present in ~/fuel/deploy/config/plugins/.
``(iv). “dea-override-config” Module``
-To configure the HA/No-HA mode, network segmentation types and role to node assignments, use the “dea-override-config” key.
+To configure the HA/No-HA mode, network segmentation types and role to node assignments, use the
+“dea-override-config” key.
.. code:: bash
@@ -271,16 +424,22 @@ To configure the HA/No-HA mode, network segmentation types and role to node assi
editable:
storage:
ephemeral_ceph:
- description: Configures Nova to store ephemeral volumes in RBD. This works best if Ceph is enabled for volumes and images, too. Enables live migration of all types of Ceph backed VMs (without this option, live migration will only work with VMs launched from Cinder volumes).
+ description: Configures Nova to store ephemeral volumes in RBD.
+ This works best if Ceph is enabled for volumes and images, too.
+ Enables live migration of all types of Ceph backed VMs (without this
+ option, live migration will only work with VMs launched from
+ Cinder volumes).
label: Ceph RBD for ephemeral volumes (Nova)
type: checkbox
value: true
weight: 75
images_ceph:
- description: Configures Glance to use the Ceph RBD backend to store images.If enabled, this option will prevent Swift from installing.
+ description: Configures Glance to use the Ceph RBD backend to store
+ images.If enabled, this option will prevent Swift from installing.
label: Ceph RBD for images (Glance)
restrictions:
- - settings:storage.images_vcenter.value == true: Only one Glance backend could be selected.
+ - settings:storage.images_vcenter.value == true: Only one Glance
+ backend could be selected.
type: checkbox
value: true
weight: 30
@@ -296,7 +455,8 @@ This is an optional key present at the ending of the scenario file.
``(vi). Mapping to short scenario name``
-The scenario.yaml file is used to map the short names of scenario's to the one or more deployment scenario configuration yaml files.
+The scenario.yaml file is used to map the short names of scenario's to the one or more deployment
+scenario configuration yaml files.
The short scenario names should follow the scheme below:
.. code:: bash
@@ -306,7 +466,8 @@ The short scenario names should follow the scheme below:
[os]: mandatory
possible value: os
-Please note that this field is needed in order to select parent jobs to list and do blocking relations between them.
+Please note that this field is needed in order to select parent jobs to list and do blocking
+relations between them.
.. code:: bash
@@ -349,7 +510,8 @@ Command to deploy the os-nosdn-kvm_ovs_dpdk-ha scenario:
.. code:: bash
$ cd ~/fuel/ci/
- $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default -s ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
+ $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default \
+ -s ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
where,
``-b`` is used to specify the configuration directory
@@ -370,6 +532,203 @@ where,
Check $ sudo ./deploy.sh -h for further information.
+Apex
+~~~~
+
+Apex installer uses CentOS as the platform.
+
+**1 Install Packages :**
+
+Install necessary packages by following:
+
+.. code:: bash
+
+ cd ~
+ yum install –y git rpm-build python-setuptools python-setuptools-devel
+ yum install –y epel-release gcc
+ curl -O https://bootstrap.pypa.io/get-pip.py
+ um install –y python3 python34
+ /usr/bin/python3.4 get-pip.py
+ yum install –y python34-devel python34-setuptools
+ yum install –y libffi-devel python-devel openssl-devel
+ yum -y install libxslt-devel libxml2-devel
+
+Then you can use “dev_deploy_check.sh“ in Apex installer source to install the remaining necessary
+packages by following:
+
+.. code:: bash
+
+ cd ~
+ git clone https://gerrit.opnfv.org/gerrit/p/apex.git
+ export CONFIG=$(pwd)/apex/build
+ export LIB=$(pwd)/apex/lib
+ export PYTHONPATH=$PYTHONPATH:$(pwd)/apex/lib/python
+ cd ci
+ ./dev_deploy_check.sh
+ yum install –y python2-oslo-config python2-debtcollector
+
+
+**2 Create ssh key :**
+
+Use following commands to create ssh key, when asked for passphrase, just enter return for empty
+passphrase:
+
+.. code:: bash
+
+ cd ~
+ ssh-keygen -t rsa
+
+Then prepare the authorized_keys for Apex scenario deployment:
+
+.. code:: bash
+
+ cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
+
+**3 Create default pool :**
+
+Use following command to default pool device:
+
+.. code:: bash
+
+ cd ~
+ virsh pool-define /dev/stdin <<EOF
+ <pool type='dir'>
+ <name>default</name>
+ <target>
+ <path>/var/lib/libvirt/images</path>
+ </target>
+ </pool>
+ EOF
+
+Use following commands to start and set autostart the default pool device:
+
+.. code:: bash
+
+ virsh pool-start default
+ virsh pool-autostart default
+
+Use following commands to verify the success of the creation of the default pool device and starting
+and setting autostart of the default pool device:
+
+.. code:: bash
+
+ virsh pool-list
+ virsh pool-info default
+
+**4 Get Apex source code :**
+
+Get Apex installer source code:
+
+.. code:: bash
+
+ git clone https://gerrit.opnfv.org/gerrit/p/apex.git
+ cd apex
+
+**5 Modify code to work behind proxy :**
+
+In “lib” sub-directory of Apex source, change line 284 “if ping -c 2 www.google.com > /dev/null;
+then” to “if curl www.google.com > /dev/null; then” in “common-functions.sh” file, since we can’t
+ping www.google.com behind Intel proxy.
+
+**6 Setup build environment :**
+
+Setup build environment by:
+
+.. code:: bash
+
+ cd ~
+ export BASE=$(pwd)/apex/build
+ export LIB=$(pwd)/apex/lib
+ export PYTHONPATH=$PYTHONPATH:$(pwd)/apex/lib/python
+ export IMAGES=$(pwd)/apex/.build
+
+**7 Build Apex installer :**
+
+Build undercloud image by:
+
+.. code:: bash
+
+ cd ~/apex/build
+ make images-clean
+ make undercloud
+
+You can look at the targets in ~/apex/build/Makefile to build image for specific feature.
+Following show how to build vanilla ODL image (this can be used to build the overcloud image for
+basic (nosdn-nofeature) and opendaylight test scenario:
+
+.. code:: bash
+
+ cd ~/apex/build
+ make overcloud-opendaylight
+
+You can build the complete full set of images (undercloud, overcloud-full, overcloud-opendaylight,
+overcloud-onos) by:
+
+.. code:: bash
+
+ cd ~/apex/build
+ make images
+
+**8 Modification of network_settings.yaml :**
+
+Since we are working behind proxy, we need to modify the network_settings.yaml in ~/apex/config/network
+to make the deployment work properly. In order to avoid checking our modification into the repo
+accidentally, it is recommend that you copy “network_settings.yaml” to “intc_network_settings.yaml”
+in the ~/apex/config/network and do following modification in intc_network_settings.yaml:
+
+Change dns_nameservers settings from
+
+.. code:: bash
+
+ dns_servers: ["8.8.8.8", "8.8.4.4"]
+to
+
+.. code:: bash
+
+ dns_servers: ["<ip-address>"]
+
+Also, you need to modify deploy.sh in apex/ci from “ntp_server="pool.ntp.org"” to
+“ntp_server="<ip-address>"” to reflect that fact we couldn’t reach outside NTP server, just use
+local time.
+
+**9 Commands to deploy scenario :**
+
+Following shows the commands used to deploy os-nosdn-kvm_ovs_dpdk-noha scenario behind the proxy:
+
+.. code:: bash
+
+ cd ~/apex/ci
+ ./clean.sh
+ ./dev_deploy_check.sh
+ ./deploy.sh -v --ping-site <ping_ip-address> --dnslookup-site <dns_ip-address> -n \
+ ~/apex/config/network/intc_network_settings.yaml -d \
+ ~/apex/config/deploy/os-nosdn-kvm_ovs_dpdk-noha.yaml
+
+**10 Accessing the Overcloud dashboard :**
+
+If the deployment completes successfully, the last few output lines from the deployment will look
+like the following:
+
+.. code:: bash
+
+ INFO: Undercloud VM has been setup to NAT Overcloud public network
+ Undercloud IP: <ip-address>, please connect by doing 'opnfv-util undercloud'
+ Overcloud dashboard available at http://<ip-address>/dashboard
+ INFO: Post Install Configuration Complete
+
+**11 Accessing the Undercloud and Overcloud through command line :**
+
+At the end of the deployment we obtain the Undercloud ip. One can login to the Undercloud and obtain
+the Overcloud ip as follows:
+
+.. code:: bash
+
+ cd ~/apex/ci/
+ ./util.sh undercloud
+ source stackrc
+ nova list
+ ssh heat-admin@<overcloud-ip>
+
OPNFV-Playground
~~~~~~~~~~~~~~~~
@@ -382,8 +741,10 @@ Install OPNFV-playground (the tool chain to deploy/test CI scenarios in fuel@opn
$ git clone https://github.com/jonasbjurel/OPNFV-Playground.git
$ cd OPNFV-Playground/ci_fuel_opnfv/
-- Follow the README.rst in this ~/OPNFV-Playground/ci_fuel_opnfv sub-holder to complete all necessary installation and setup.
-- Section “RUNNING THE PIPELINE” in README.rst explain how to use this ci_pipeline to deploy/test CI test scenarios, you can also use
+- Follow the README.rst in this ~/OPNFV-Playground/ci_fuel_opnfv sub-holder to complete all necessary
+installation and setup.
+- Section “RUNNING THE PIPELINE” in README.rst explain how to use this ci_pipeline to deploy/test CI
+test scenarios, you can also use
.. code:: bash
@@ -393,14 +754,16 @@ Install OPNFV-playground (the tool chain to deploy/test CI scenarios in fuel@opn
``1 Downgrade paramiko package from 2.x.x to 1.10.0``
-The paramiko package 2.x.x doesn’t work with OPNFV-playground tool chain now, Jira ticket FUEL - 188 has been raised for the same.
+The paramiko package 2.x.x doesn’t work with OPNFV-playground tool chain now, Jira ticket FUEL - 188
+has been raised for the same.
Check paramiko package version by following below steps in your system:
.. code:: bash
$ python
- Python 2.7.6 (default, Jun 22 2015, 17:58:13) [GCC 4.8.2] on linux2 Type "help", "copyright", "credits" or "license" for more information.
+ Python 2.7.6 (default, Jun 22 2015, 17:58:13) [GCC 4.8.2] on linux2 Type "help", "copyright",
+ "credits" or "license" for more information.
>>> import paramiko
>>> print paramiko.__version__
@@ -448,7 +811,8 @@ Implement the scenario file as described in 3.1.4
``4 Deploying the scenario``
-You can use the following command to deploy/test os-nosdn kvm_ovs_dpdk-(no)ha and os-nosdn-kvm_ovs_dpdk_bar-(no)ha scenario
+You can use the following command to deploy/test os-nosdn kvm_ovs_dpdk-(no)ha and
+os-nosdn-kvm_ovs_dpdk_bar-(no)ha scenario
.. code:: bash
@@ -482,12 +846,15 @@ Note:
Jenkins Project
~~~~~~~~~~~~~~~
-os-nosdn-kvm_ovs_dpdk-(no)ha and os-nosdn-kvm_ovs_dpdk_bar-(no)ha scenario can be executed from the jenkins project :
+os-nosdn-kvm_ovs_dpdk-(no)ha and os-nosdn-kvm_ovs_dpdk_bar-(no)ha scenario can be executed from the
+jenkins project :
``HA scenarios:``
1. "fuel-os-nosdn-kvm_ovs_dpdk-ha-baremetal-daily-master" (os-nosdn-kvm_ovs_dpdk-ha)
2. "fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-baremetal-daily-master" (os-nosdn-kvm_ovs_dpdk_bar-ha)
+ 3. "apex-os-nosdn-kvm_ovs_dpdk-ha-baremetal-master" (os-nosdn-kvm_ovs_dpdk-ha)
``NOHA scenarios:``
- 1. "fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-master" (os-nosdn-kvm_ovs_dpdk-noha)
- 2. "fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-master" (os-nosdn-kvm_ovs_dpdk_bar-noha)
+ 1. "fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-master" (os-nosdn-kvm_ovs_dpdk-noha)
+ 2. "fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-master" (os-nosdn-kvm_ovs_dpdk_bar-noha)
+ 3. "apex-os-nosdn-kvm_ovs_dpdk-noha-baremetal-master" (os-nosdn-kvm_ovs_dpdk-noha)
diff --git a/docs/release/scenarios/os-nosdn-kvm-ha/os-nosdn-kvm-ha.description.rst b/docs/release/scenarios/os-nosdn-kvm-ha/os-nosdn-kvm-ha.description.rst
index f64f26ffc..a02033d58 100644
--- a/docs/release/scenarios/os-nosdn-kvm-ha/os-nosdn-kvm-ha.description.rst
+++ b/docs/release/scenarios/os-nosdn-kvm-ha/os-nosdn-kvm-ha.description.rst
@@ -98,7 +98,8 @@ Scenario Usage Overview
.. code:: bash
- sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr -log ~/Deployment-888.log.tar.gz
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha \
+ ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr -log ~/Deployment-888.log.tar.gz
* Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual
Environment:
@@ -107,7 +108,8 @@ Scenario Usage Overview
.. code:: bash
- sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images -log ~/Deployment-888.log.tar.gz
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha \
+ ~/CONF/virtual/dha.yaml -s /mnt/images -log ~/Deployment-888.log.tar.gz
* os-nosdn-kvm-ha scenario can be executed from the jenkins project
"fuel-os-nosdn-kvm-ha-baremetal-daily-master"
@@ -129,5 +131,5 @@ Known Limitations, Issues and Workarounds
References
----------
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/euphrates
diff --git a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-ha/index.rst b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-ha/index.rst
index ddb6071c8..70a7fbf11 100755
--- a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-ha/index.rst
+++ b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-ha/index.rst
@@ -1,14 +1,14 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. _kvmfornfv-os-nosdn-kvm_nfv_ovs_dpdk-ha:
+.. _kvmfornfv-os-nosdn-kvm_ovs_dpdk-ha:
*****************************************************
-os-nosdn-kvm_nfv_ovs_dpdk-ha Overview and Description
+os-nosdn-kvm_ovs_dpdk-ha Overview and Description
*****************************************************
.. toctree::
:numbered:
:maxdepth: 3
- ./os-nosdn-kvm_nfv_ovs_dpdk-ha.description.rst
+ ./os-nosdn-kvm_ovs_dpdk-ha.description.rst
diff --git a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-ha/os-nosdn-kvm_nfv_ovs_dpdk-ha.description.rst b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-ha/os-nosdn-kvm_ovs_dpdk-ha.description.rst
index a96130cad..9fac19247 100644
--- a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-ha/os-nosdn-kvm_nfv_ovs_dpdk-ha.description.rst
+++ b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-ha/os-nosdn-kvm_ovs_dpdk-ha.description.rst
@@ -3,7 +3,7 @@
.. http://creativecommons.org/licenses/by/4.0
=========================================
-os-nosdn-kvm_nfv_ovs_dpdk-ha Description
+os-nosdn-kvm_ovs_dpdk-ha Description
=========================================
Introduction
@@ -20,7 +20,11 @@ QEMU patches for achieving low latency. High Availability feature is achieved
by deploying OpenStack multi-node setup with 3 controllers and 2 computes nodes.
KVM4NFV packages will be installed on compute nodes as part of deployment.
-This scenario testcase deployment is happening on multi-node by using OPNFV Fuel deployer.
+This scenario testcase deployment is happening on multi-node by using OPNFV Fuel
+and Apex deployer.
+
+
+**Using Fuel Installer**
Scenario Components and Composition
-----------------------------------
@@ -135,13 +139,17 @@ argument to deploy.py script
editable:
storage:
ephemeral_ceph:
- description: Configures Nova to store ephemeral volumes in RBD. This works best if Ceph is enabled for volumes and images, too. Enables live migration of all types of Ceph backed VMs (without this option, live migration will only work with VMs launched from Cinder volumes).
+ description: Configures Nova to store ephemeral volumes in RBD. This works best if Ceph
+ is enabled for volumes and images, too. Enables live migration of all types of Ceph
+ backed VMs (without this option, live migration will only work with VMs launched from
+ Cinder volumes).
label: Ceph RBD for ephemeral volumes (Nova)
type: checkbox
value: true
weight: 75
images_ceph:
- description: Configures Glance to use the Ceph RBD backend to store images. If enabled, this option will prevent Swift from installing.
+ description: Configures Glance to use the Ceph RBD backend to store images. If enabled,
+ this option will prevent Swift from installing.
label: Ceph RBD for images (Glance)
restrictions:
- settings:storage.images_vcenter.value == true: Only one Glance backend could be selected.
@@ -190,7 +198,8 @@ argument to deploy.py script
* In os-nosdn-kvm_ovs_dpdk-ha scenario, OVS is installed on the compute nodes with DPDK configured
-* Hugepages for DPDK are configured in the attributes_1 section of the no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml
+* Hugepages for DPDK are configured in the attributes_1 section of the
+no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml
* Hugepages are only configured for compute nodes
@@ -199,7 +208,7 @@ argument to deploy.py script
Scenario Usage Overview
-----------------------
.. Provide a brief overview on how to use the scenario and the features available to the
-.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
+.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
.. where the specifics of the features are covered including examples and API's
* The high availability feature can be acheived by executing deploy.py with
@@ -213,7 +222,8 @@ Command to deploy the os-nosdn-kvm_ovs_dpdk-ha scenario:
.. code:: bash
$ cd ~/fuel/ci/
- $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default -s ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
+ $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default \
+ -s ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
where,
-b is used to specify the configuration directory
@@ -234,14 +244,110 @@ where,
* Test Scenario is passed if deployment is successful and all 5 nodes have
accessibility (IP , up & running).
-Known Limitations, Issues and Workarounds
------------------------------------------
-.. Explain any known limitations here.
-* Test scenario os-nosdn-kvm_ovs_dpdk-ha result is not stable.
+**Using Apex Installer**
+
+Scenario Components and Composition
+-----------------------------------
+.. In this section describe the unique components that make up the scenario,
+.. what each component provides and why it has been included in order
+.. to communicate to the user the capabilities available in this scenario.
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. Optionally and
+by default, Tacker and Congress services are also enabled. Ceph is used as
+the backend storage to Cinder on all deployed nodes.
+
+All services are in HA, meaning that there are multiple cloned instances of
+each service, and they are balanced by HA Proxy using a Virtual IP Address
+per service.
+
+The os-nosdn-kvm_ovs_dpdk-ha.yaml file contains following configurations and
+is passed as an argument to deploy.sh script.
+
+* ``global-params:`` Used to define the global parameter and there is only one
+ such parameter exists,i.e, ha_enabled
+
+.. code:: bash
+
+ global-params:
+ ha_enabled: true
+
+* ``deploy_options:`` Used to define the type of SDN controller, configure the
+ tacker, congress, service functioning chaining support(sfc) for ODL and ONOS,
+ configure ODL with SDNVPN support, which dataplane to use for overcloud
+ tenant networks, whether to run the kvm real time kernel (rt_kvm) in the
+ compute node(s) to reduce the network latencies caused by network function
+ virtualization and whether to install and configure fdio functionality in the
+ overcloud
+
+.. code:: bash
+
+ deploy_options:
+ sdn_controller: false
+ tacker: true
+ congress: true
+ sfc: false
+ vpn: false
+ rt_kvm: true
+ dataplane: ovs_dpdk
+
+* ``performance:`` Used to set performance options on specific roles. The valid
+ roles are 'Compute', 'Controller' and 'Storage', and the valid sections are
+ 'kernel' and 'nova'
+
+.. code:: bash
+
+ performance:
+ Controller:
+ kernel:
+ hugepages: 1024
+ hugepagesz: 2M
+ Compute:
+ kernel:
+ hugepagesz: 2M
+ hugepages: 2048
+ intel_iommu: 'on'
+ iommu: pt
+ ovs:
+ socket_memory: 1024
+ pmd_cores: 2
+ dpdk_cores: 1
+
+Scenario Usage Overview
+-----------------------
+.. Provide a brief overview on how to use the scenario and the features available to the
+.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
+.. where the specifics of the features are covered including examples and API's
+
+* The high availability feature can be acheived by executing deploy.sh with
+ os-nosdn-kvm_ovs_dpdk-ha.yaml as an argument.
+
+* Build the undercloud and overcloud images as mentioned below:
+
+.. code:: bash
+
+ cd ~/apex/build/
+ make images-clean
+ make images
+
+* Command to deploy os-nosdn-kvm_ovs_dpdk-ha scenario:
+
+.. code:: bash
+
+ cd ~/apex/ci/
+ ./clean.sh
+ ./dev_dep_check.sh
+ ./deploy.sh -v --ping-site <ping_ip-address> --dnslookup-site <dns_ip-address> -n \
+ ~/apex/config/network/intc_network_settings.yaml -d ~/apex/config/deploy/os-nosdn-kvm_ovs_dpdk-ha.yaml
+
+where,
+ -v is used for virtual deployment
+ -n is used for providing the network configuration file
+ -d is used for providing the scenario configuration file
References
----------
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/Danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/Euphrates
diff --git a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-noha/index.rst b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-noha/index.rst
index 742ddb1ee..e5901cc62 100755
--- a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-noha/index.rst
+++ b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-noha/index.rst
@@ -1,14 +1,14 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. _kvmfornfv-os-nosdn-kvm_nfv_ovs_dpdk-noha:
+.. _kvmfornfv-os-nosdn-kvm_ovs_dpdk-noha:
*******************************************************
-os-nosdn-kvm_nfv_ovs_dpdk-noha Overview and Description
+os-nosdn-kvm_ovs_dpdk-noha Overview and Description
*******************************************************
.. toctree::
:numbered:
:maxdepth: 3
- ./os-nosdn-kvm_nfv_ovs_dpdk-noha.description.rst
+ ./os-nosdn-kvm_ovs_dpdk-noha.description.rst
diff --git a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-noha/os-nosdn-kvm_nfv_ovs_dpdk-noha.description.rst b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-noha/os-nosdn-kvm_ovs_dpdk-noha.description.rst
index a7778d963..6bd5d4adc 100644
--- a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk-noha/os-nosdn-kvm_nfv_ovs_dpdk-noha.description.rst
+++ b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk-noha/os-nosdn-kvm_ovs_dpdk-noha.description.rst
@@ -3,7 +3,7 @@
.. http://creativecommons.org/licenses/by/4.0
==========================================
-os-nosdn-kvm_nfv_ovs_dpdk-noha Description
+os-nosdn-kvm_ovs_dpdk-noha Description
==========================================
Introduction
@@ -16,11 +16,17 @@ The purpose of os-nosdn-kvm_ovs_dpdk-noha scenario testing is to test the No
High Availability deployment and configuration of OPNFV software suite
with OpenStack and without SDN software. This OPNFV software suite
includes OPNFV KVM4NFV latest software packages for Linux Kernel and
-QEMU patches for achieving low latency. No High Availability feature is achieved
-by deploying OpenStack multi-node setup with 1 controller and 3 computes nodes.
+QEMU patches for achieving low latency. When deployed using Fuel, No High
+Availability feature is achieved by deploying OpenStack multi-node setup with
+1 controller and 3 computes nodes and using Apex the setup is with 1 controller
+and 1 compute.
KVM4NFV packages will be installed on compute nodes as part of deployment.
-This scenario testcase deployment is happening on multi-node by using OPNFV Fuel deployer.
+This scenario testcase deployment is happening on multi-node by using OPNFV Fuel
+and Apex deployer.
+
+
+**Using Fuel Installer**
Scenario Components and Composition
------------------------------------
@@ -151,13 +157,17 @@ argument to deploy.py script
editable:
storage:
ephemeral_ceph:
- description: Configures Nova to store ephemeral volumes in RBD. This works best if Ceph is enabled for volumes and images, too. Enables live migration of all types of Ceph backed VMs (without this option, live migration will only work with VMs launched from Cinder volumes).
+ description: Configures Nova to store ephemeral volumes in RBD. This works best if Ceph
+ is enabled for volumes and images, too. Enables live migration of all types of Ceph
+ backed VMs (without this option, live migration will only work with VMs launched from
+ Cinder volumes).
label: Ceph RBD for ephemeral volumes (Nova)
type: checkbox
value: true
weight: 75
images_ceph:
- description: Configures Glance to use the Ceph RBD backend to store images. If enabled, this option will prevent Swift from installing.
+ description: Configures Glance to use the Ceph RBD backend to store images. If enabled,
+ this option will prevent Swift from installing.
label: Ceph RBD for images (Glance)
restrictions:
- settings:storage.images_vcenter.value == true: Only one Glance backend could be selected.
@@ -180,7 +190,8 @@ argument to deploy.py script
* In os-nosdn-kvm_ovs_dpdk-noha scenario, OVS is installed on the compute nodes with DPDK configured
-* Hugepages for DPDK are configured in the attributes_1 section of the no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml
+* Hugepages for DPDK are configured in the attributes_1 section of the
+no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml
* Hugepages are only configured for compute nodes
@@ -205,7 +216,8 @@ Command to deploy the os-nosdn-kvm_ovs_dpdk-noha scenario:
.. code:: bash
$ cd ~/fuel/ci/
- $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default -s no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
+ $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default \
+ -s no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
where,
-b is used to specify the configuration directory
@@ -226,14 +238,107 @@ where,
* Test Scenario is passed if deployment is successful and all 4 nodes have
accessibility (IP , up & running).
-Known Limitations, Issues and Workarounds
------------------------------------------
-.. Explain any known limitations here.
-* Test scenario os-nosdn-kvm_ovs_dpdk-noha result is not stable.
+**Using Apex Installer**
+
+Scenario Components and Composition
+-----------------------------------
+.. In this section describe the unique components that make up the scenario,
+.. what each component provides and why it has been included in order
+.. to communicate to the user the capabilities available in this scenario.
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. Optionally and
+by default, Tacker and Congress services are also enabled. Ceph is used as
+the backend storage to Cinder on all deployed nodes.
+
+The os-nosdn-kvm_ovs_dpdk-noha.yaml file contains following configurations and
+is passed as an argument to deploy.sh script.
+
+* ``global-params:`` Used to define the global parameter and there is only one
+ such parameter exists,i.e, ha_enabled
+
+.. code:: bash
+
+ global-params:
+ ha_enabled: false
+
+* ``deploy_options:`` Used to define the type of SDN controller, configure the
+ tacker, congress, service functioning chaining support(sfc) for ODL and ONOS,
+ configure ODL with SDNVPN support, which dataplane to use for overcloud
+ tenant networks, whether to run the kvm real time kernel (rt_kvm) in the
+ compute node(s) to reduce the network latencies caused by network function
+ virtualization and whether to install and configure fdio functionality in the
+ overcloud
+
+.. code:: bash
+
+ deploy_options:
+ sdn_controller: false
+ tacker: true
+ congress: true
+ sfc: false
+ vpn: false
+ rt_kvm: true
+ dataplane: ovs_dpdk
+
+* ``performance:`` Used to set performance options on specific roles. The valid
+ roles are 'Compute', 'Controller' and 'Storage', and the valid sections are
+ 'kernel' and 'nova'
+
+.. code:: bash
+
+ performance:
+ Controller:
+ kernel:
+ hugepages: 1024
+ hugepagesz: 2M
+ Compute:
+ kernel:
+ hugepagesz: 2M
+ hugepages: 2048
+ intel_iommu: 'on'
+ iommu: pt
+ ovs:
+ socket_memory: 1024
+ pmd_cores: 2
+ dpdk_cores: 1
+
+Scenario Usage Overview
+-----------------------
+.. Provide a brief overview on how to use the scenario and the features available to the
+.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
+.. where the specifics of the features are covered including examples and API's
+
+* The high availability feature can be acheived by executing deploy.sh with
+ os-nosdn-kvm_ovs_dpdk-noha.yaml as an argument.
+
+* Build the undercloud and overcloud images as mentioned below:
+
+.. code:: bash
+
+ cd ~/apex/build/
+ make images-clean
+ make images
+
+* Command to deploy os-nosdn-kvm_ovs_dpdk-noha scenario:
+
+.. code:: bash
+
+ cd ~/apex/ci/
+ ./clean.sh
+ ./dev_dep_check.sh
+ ./deploy.sh -v --ping-site <ping_ip-address> --dnslookup-site <dns_ip-address> -n \
+ ~/apex/config/network/intc_network_settings.yaml -d ~/apex/config/deploy/os-nosdn-kvm_ovs_dpdk-noha.yaml
+
+where,
+ -v is used for virtual deployment
+ -n is used for providing the network configuration file
+ -d is used for providing the scenario configuration file
+
References
----------
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/Danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/Euphrates
diff --git a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha/index.rst b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/index.rst
index a8192edcc..73e7927fd 100755
--- a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha/index.rst
+++ b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/index.rst
@@ -1,14 +1,14 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. _kvmfornfv-os-nosdn-kvm_nfv_ovs_dpdk_bar-ha:
+.. _kvmfornfv-os-nosdn-kvm_ovs_dpdk_bar-ha:
*********************************************************
-os-nosdn-kvm_nfv_ovs_dpdk_bar-ha Overview and Description
+os-nosdn-kvm_ovs_dpdk_bar-ha Overview and Description
*********************************************************
.. toctree::
:numbered:
:maxdepth: 3
- ./os-nosdn-kvm_nfv_ovs_dpdk_bar-ha.description.rst
+ ./os-nosdn-kvm_ovs_dpdk_bar-ha.description.rst
diff --git a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha.description.rst b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/os-nosdn-kvm_ovs_dpdk_bar-ha.description.rst
index 0ab20514a..ee182297a 100644
--- a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha/os-nosdn-kvm_nfv_ovs_dpdk_bar-ha.description.rst
+++ b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/os-nosdn-kvm_ovs_dpdk_bar-ha.description.rst
@@ -3,7 +3,7 @@
.. http://creativecommons.org/licenses/by/4.0
============================================
-os-nosdn-kvm_nfv_ovs_dpdk_bar-ha Description
+os-nosdn-kvm_ovs_dpdk_bar-ha Description
============================================
Introduction
@@ -142,13 +142,17 @@ argument to deploy.py script
editable:
storage:
ephemeral_ceph:
- description: Configures Nova to store ephemeral volumes in RBD. This works best if Ceph is enabled for volumes and images, too. Enables live migration of all types of Ceph backed VMs (without this option, live migration will only work with VMs launched from Cinder volumes).
+ description: Configures Nova to store ephemeral volumes in RBD. This works best if Ceph
+ is enabled for volumes and images, too. Enables live migration of all types of Ceph
+ backed VMs (without this option, live migration will only work with VMs launched from
+ Cinder volumes).
label: Ceph RBD for ephemeral volumes (Nova)
type: checkbox
value: true
weight: 75
images_ceph:
- description: Configures Glance to use the Ceph RBD backend to store images. If enabled, this option will prevent Swift from installing.
+ description: Configures Glance to use the Ceph RBD backend to store images. If enabled,
+ this option will prevent Swift from installing.
label: Ceph RBD for images (Glance)
restrictions:
- settings:storage.images_vcenter.value == true: Only one Glance backend could be selected.
@@ -199,7 +203,8 @@ argument to deploy.py script
* Baraometer plugin is also implemented along with KVM plugin
-* Hugepages for DPDK are configured in the attributes_1 section of the no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml
+* Hugepages for DPDK are configured in the attributes_1 section of the
+no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml
* Hugepages are only configured for compute nodes
@@ -223,7 +228,8 @@ Command to deploy the os-nosdn-kvm_ovs_dpdk_bar-ha scenario:
.. code:: bash
$ cd ~/fuel/ci/
- $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default -s ha_nfv-kvm_nfv-ovs-dpdk-bar_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
+ $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default \
+ -s ha_nfv-kvm_nfv-ovs-dpdk-bar_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
where,
-b is used to specify the configuration directory
@@ -253,5 +259,5 @@ Known Limitations, Issues and Workarounds
References
----------
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/Danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/Euphrates
diff --git a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha/index.rst b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-noha/index.rst
index 3a07e98c9..e1557b0fe 100755
--- a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha/index.rst
+++ b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-noha/index.rst
@@ -1,14 +1,14 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. _kvmfornfv-os-nosdn-kvm_nfv_ovs_dpdk_bar-noha:
+.. _kvmfornfv-os-nosdn-kvm_ovs_dpdk_bar-noha:
***********************************************************
-os-nosdn-kvm_nfv_ovs_dpdk_bar-noha Overview and Description
+os-nosdn-kvm_ovs_dpdk_bar-noha Overview and Description
***********************************************************
.. toctree::
:numbered:
:maxdepth: 3
- ./os-nosdn-kvm_nfv_ovs_dpdk_bar-noha.description.rst
+ ./os-nosdn-kvm_ovs_dpdk_bar-noha.description.rst
diff --git a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha.description.rst b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-noha/os-nosdn-kvm_ovs_dpdk_bar-noha.description.rst
index 47a7f1034..48f2852fe 100644
--- a/docs/release/scenarios/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha/os-nosdn-kvm_nfv_ovs_dpdk_bar-noha.description.rst
+++ b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-noha/os-nosdn-kvm_ovs_dpdk_bar-noha.description.rst
@@ -3,7 +3,7 @@
.. http://creativecommons.org/licenses/by/4.0
============================================
-os-nosdn-kvm_nfv_ovs_dpdk_bar-ha Description
+os-nosdn-kvm_ovs_dpdk_bar-ha Description
============================================
Introduction
@@ -51,7 +51,8 @@ argument to deploy.py script
* ``stack-extensions:`` Stack extentions are opnfv added value features in form
of a fuel-plugin.Plugins listed in stack extensions are enabled and
- configured. os-nosdn-kvm_ovs_dpdk_bar-noha scenario currently uses KVM-1.0.0 plugin and barometer-1.0.0 plugin.
+ configured. os-nosdn-kvm_ovs_dpdk_bar-noha scenario currently uses KVM-1.0.0 plugin and
+ barometer-1.0.0 plugin.
.. code:: bash
@@ -157,13 +158,17 @@ argument to deploy.py script
editable:
storage:
ephemeral_ceph:
- description: Configures Nova to store ephemeral volumes in RBD. This works best if Ceph is enabled for volumes and images, too. Enables live migration of all types of Ceph backed VMs (without this option, live migration will only work with VMs launched from Cinder volumes).
+ description: Configures Nova to store ephemeral volumes in RBD. This works best if Ceph
+ is enabled for volumes and images, too. Enables live migration of all types of Ceph
+ backed VMs (without this option, live migration will only work with VMs launched from
+ Cinder volumes).
label: Ceph RBD for ephemeral volumes (Nova)
type: checkbox
value: true
weight: 75
images_ceph:
- description: Configures Glance to use the Ceph RBD backend to store images. If enabled, this option will prevent Swift from installing.
+ description: Configures Glance to use the Ceph RBD backend to store images. If enabled,
+ this option will prevent Swift from installing.
label: Ceph RBD for images (Glance)
restrictions:
- settings:storage.images_vcenter.value == true: Only one Glance backend could be selected.
@@ -174,7 +179,8 @@ argument to deploy.py script
* ``dha-override-config:`` Provides information about the VM definition and
Network config for virtual deployment.These configurations overrides
the pod dha definition and points to the controller,compute and
- fuel definition files. The noha_nfv-kvm_nfv-ovs-dpdk-bar_heat_ceilometer_scenario.yaml has no dha-config changes i.e., default configuration is used.
+ fuel definition files. The noha_nfv-kvm_nfv-ovs-dpdk-bar_heat_ceilometer_scenario.yaml has no
+ dha-config changes i.e., default configuration is used.
* os-nosdn-kvm_ovs_dpdk_bar-noha scenario is successful when all the 4 Nodes are accessible,
up and running.
@@ -187,7 +193,8 @@ argument to deploy.py script
* Baraometer plugin is also implemented along with KVM plugin.
-* Hugepages for DPDK are configured in the attributes_1 section of the no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml
+* Hugepages for DPDK are configured in the attributes_1 section of the
+ no-ha_nfv-kvm_nfv-ovs-dpdk_heat_ceilometer_scenario.yaml
* Hugepages are only configured for compute nodes
@@ -210,7 +217,8 @@ Command to deploy the os-nosdn-kvm_ovs_dpdk_bar-noha scenario:
.. code:: bash
$ cd ~/fuel/ci/
- $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default -s no-ha_nfv-kvm_nfv-ovs-dpdk-bar_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
+ $ sudo ./deploy.sh -f -b file:///tmp/opnfv-fuel/deploy/config -l devel-pipeline -p default \
+ -s no-ha_nfv-kvm_nfv-ovs-dpdk-bar_heat_ceilometer_scenario.yaml -i file:///tmp/opnfv.iso
where,
-b is used to specify the configuration directory
@@ -240,5 +248,5 @@ Known Limitations, Issues and Workarounds
References
----------
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/Danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/Euphrates
diff --git a/docs/release/userguide/Ftrace.debugging.tool.userguide.rst b/docs/release/userguide/Ftrace.debugging.tool.userguide.rst
index 95b7f8fe5..bd4d76d73 100644
--- a/docs/release/userguide/Ftrace.debugging.tool.userguide.rst
+++ b/docs/release/userguide/Ftrace.debugging.tool.userguide.rst
@@ -35,6 +35,10 @@ Version Features
| Danube | 4.4-linux-kernel level issues |
| | - Option to disable if not required |
+-----------------------------+-----------------------------------------------+
+| | - Breaktrace option is implemented. |
+| Euphrates | - Implemented post-execute script option to |
+| | disable the ftrace when it is enabled. |
++-----------------------------+-----------------------------------------------+
Implementation of Ftrace
@@ -56,7 +60,8 @@ Or you can mount it at run time with:
mount -t debugfs nodev /sys/kernel/debug
-Some configurations for Ftrace are used for other purposes, like finding latency or analyzing the system. For the purpose of debugging, the kernel configuration parameters that should be enabled are:
+Some configurations for Ftrace are used for other purposes, like finding latency or analyzing the
+system. For the purpose of debugging, the kernel configuration parameters that should be enabled are:
.. code:: bash
@@ -65,7 +70,8 @@ Some configurations for Ftrace are used for other purposes, like finding latency
CONFIG_STACK_TRACER=y
CONFIG_DYNAMIC_FTRACE=y
-The above parameters must be enabled in /boot/config-4.4.0-el7.x86_64 i.e., kernel config file for ftrace to work. If not enabled, change the parameter to ``y`` and run.,
+The above parameters must be enabled in /boot/config-4.4.0-el7.x86_64 i.e., kernel config file for
+ftrace to work. If not enabled, change the parameter to ``y`` and run.,
.. code:: bash
@@ -85,11 +91,13 @@ The below is a list of few major files in Ftrace.
``available_tracers:``
- This holds the different types of tracers that have been compiled into the kernel. The tracers listed here can be configured by echoing their name into current_tracer.
+ This holds the different types of tracers that have been compiled into the kernel.
+ The tracers listed here can be configured by echoing their name into current_tracer.
``tracing_on:``
- This sets or displays whether writing to the tracering buffer is enabled. Echo 0 into this file to disable the tracer or 1 to enable it.
+ This sets or displays whether writing to the tracering buffer is enabled. Echo 0 into this
+ file to disable the tracer or 1 to enable it.
``trace:``
@@ -97,11 +105,13 @@ The below is a list of few major files in Ftrace.
``tracing_cpumask:``
- This is a mask that lets the user only trace on specified CPUs. The format is a hex string representing the CPUs.
+ This is a mask that lets the user only trace on specified CPUs. The format is a hex string
+ representing the CPUs.
``events:``
- It holds event tracepoints (also known as static tracepoints) that have been compiled into the kernel. It shows what event tracepoints exist and how they are grouped by system.
+ It holds event tracepoints (also known as static tracepoints) that have been compiled into
+ the kernel. It shows what event tracepoints exist and how they are grouped by system.
Avaliable Tracers
@@ -125,11 +135,13 @@ Brief about a few:
``function_graph:``
- Similar to the function tracer except that the function tracer probes the functions on their entry whereas the function graph tracer traces on both entry and exit of the functions.
+ Similar to the function tracer except that the function tracer probes the functions on their
+ entry whereas the function graph tracer traces on both entry and exit of the functions.
``nop:``
- This is the "trace nothing" tracer. To remove tracers from tracing simply echo "nop" into current_tracer.
+ This is the "trace nothing" tracer. To remove tracers from tracing simply echo "nop" into
+ current_tracer.
Examples:
@@ -221,7 +233,8 @@ The set_event file contains all the enabled events list
sudo bash -c "echo function > $TRACEDIR/current_tracer
-- When tracing is turned ON by setting ``tracing_on=1``, the ``trace`` file keeps getting append with the traced data until ``tracing_on=0`` and then ftrace_buffer gets cleared.
+- When tracing is turned ON by setting ``tracing_on=1``, the ``trace`` file keeps getting append
+with the traced data until ``tracing_on=0`` and then ftrace_buffer gets cleared.
.. code:: bash
@@ -231,7 +244,42 @@ The set_event file contains all the enabled events list
To Start/Restart,
echo 1 >tracing_on;
-- Once tracing is diabled, disable_trace.sh script is triggered.
+- Once tracing is disabled, disable_trace.sh script is triggered.
+
+BREAKTRACE
+----------
+- Send break trace command when latency > USEC. This is a debugging option to control the latency
+tracer in the realtime preemption patch. It is useful to track down unexpected large latencies on a
+system. This option does only work with following kernel config options.
+
+For kernel < 2.6.24:
+* CONFIG_PREEMPT_RT=y
+* CONFIG_WAKEUP_TIMING=y
+* CONFIG_LATENCY_TRACE=y
+* CONFIG_CRITICAL_PREEMPT_TIMING=y
+* CONFIG_CRITICAL_IRQSOFF_TIMING=y
+
+For kernel >= 2.6.24:
+* CONFIG_PREEMPT_RT=y
+* CONFIG_FTRACE
+* CONFIG_IRQSOFF_TRACER=y
+* CONFIG_PREEMPT_TRACER=y
+* CONFIG_SCHED_TRACER=y
+* CONFIG_WAKEUP_LATENCY_HIST
+
+- Kernel configuration options enabled. The USEC parameter to the -b option defines a maximum
+latency value, which is compared against the actual latencies of the test. Once the measured latency
+is higher than the given maximum, the kernel tracer and cyclictest is stopped. The trace can be read
+from /proc/latency_trace. Please be aware that the tracer adds significant overhead to the kernel,
+so the latencies will be much higher than on a kernel with latency tracing disabled.
+
+- Breaktrace option will enable the trace by default, suppress the tracing by using --notrace option.
+
+Post-execute scripts
+--------------------
+post-execute script to yardstick node context teardown is added to disable the ftrace soon after the
+completion of cyclictest execution throughyardstick. This option is implemented to collect only
+required ftrace logs for effective debugging if needed.
Details of disable_trace Script
-------------------------------
diff --git a/docs/release/userguide/common.platform.render.rst b/docs/release/userguide/common.platform.render.rst
index 46b4707a3..6487194d8 100644
--- a/docs/release/userguide/common.platform.render.rst
+++ b/docs/release/userguide/common.platform.render.rst
@@ -7,7 +7,7 @@ Using common platform components
================================
This section outlines basic usage principals and methods for some of the
-commonly deployed components of supported OPNFV scenario's in Danube.
+commonly deployed components of supported OPNFV scenario's in Euphrates.
The subsections provide an outline of how these components are commonly
used and how to address them in an OPNFV deployment.The components derive
from autonomous upstream communities and where possible this guide will
diff --git a/docs/release/userguide/feature.userguide.render.rst b/docs/release/userguide/feature.userguide.render.rst
index 3bed21fc9..e103f5f2d 100644
--- a/docs/release/userguide/feature.userguide.render.rst
+++ b/docs/release/userguide/feature.userguide.render.rst
@@ -3,7 +3,7 @@
.. http://creativecommons.org/licenses/by/4.0
==========================
-Using Danube Features
+Using Euphrates Features
==========================
The following sections of the user guide provide feature specific usage
diff --git a/docs/release/userguide/kvmfornfv.cyclictest-dashboard.userguide.rst b/docs/release/userguide/kvmfornfv.cyclictest-dashboard.userguide.rst
index c119b43c7..e5be012b6 100644
--- a/docs/release/userguide/kvmfornfv.cyclictest-dashboard.userguide.rst
+++ b/docs/release/userguide/kvmfornfv.cyclictest-dashboard.userguide.rst
@@ -96,10 +96,11 @@ Three type of dispatcher methods are available to store the cyclictest results.
- InfluxDB
- HTTP
-**1. File**: Default Dispatcher module is file. If the dispatcher module is configured as a file,then the test results are stored in a temporary file yardstick.out
-( default path: /tmp/yardstick.out).
-Dispatcher module of "Verify Job" is "Default". So,the results are stored in Yardstick.out file for verify job.
-Storing all the verify jobs in InfluxDB database causes redundancy of latency values. Hence, a File output format is prefered.
+**1. File**: Default Dispatcher module is file. If the dispatcher module is configured as a file,
+then the test results are stored in a temporary file yardstick.out(default path: /tmp/yardstick.out).
+Dispatcher module of "Verify Job" is "Default". So,the results are stored in Yardstick.out file for
+verify job. Storing all the verify jobs in InfluxDB database causes redundancy of latency values.
+Hence, a File output format is prefered.
.. code:: bash
@@ -112,8 +113,9 @@ Storing all the verify jobs in InfluxDB database causes redundancy of latency va
max_bytes = 0
backup_count = 0
-**2. Influxdb**: If the dispatcher module is configured as influxdb, then the test results are stored in Influxdb.
-Users can check test resultsstored in the Influxdb(Database) on Grafana which is used to visualize the time series data.
+**2. Influxdb**: If the dispatcher module is configured as influxdb, then the test results are
+stored in Influxdb. Users can check test resultsstored in the Influxdb(Database) on Grafana which is
+used to visualizethe time series data.
To configure the influxdb, the following content in /etc/yardstick/yardstick.conf need to updated
@@ -130,9 +132,11 @@ To configure the influxdb, the following content in /etc/yardstick/yardstick.con
username = root
password = root
-Dispatcher module of "Daily Job" is Influxdb. So, the results are stored in influxdb and then published to Dashboard.
+Dispatcher module of "Daily Job" is Influxdb. So, the results are stored in influxdb and then
+published to Dashboard.
-**3. HTTP**: If the dispatcher module is configured as http, users can check test result on OPNFV testing dashboard which uses MongoDB as backend.
+**3. HTTP**: If the dispatcher module is configured as http, users can check test result on OPNFV
+testing dashboard which uses MongoDB as backend.
.. code:: bash
@@ -150,12 +154,14 @@ Dispatcher module of "Daily Job" is Influxdb. So, the results are stored in infl
Detailing the dispatcher module in verify and daily Jobs:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-KVM4NFV updates the dispatcher module in the yardstick configuration file(/etc/yardstick/yardstick.conf) depending on the Job type(Verify/Daily).
-Once the test is completed, results are published to the respective dispatcher modules.
+KVM4NFV updates the dispatcher module in the yardstick configuration (/etc/yardstick/yardstick.conf)
+file depending on the Job type(Verify/Daily). Once the test is completed, results are published to
+the respective dispatcher modules.
Dispatcher module is configured for each Job type as mentioned below.
-1. ``Verify Job`` : Default "DISPATCHER_TYPE" i.e. file(/tmp/yardstick.out) is used. User can also see the test results on Jenkins console log.
+1. ``Verify Job`` : Default "DISPATCHER_TYPE" i.e. file(/tmp/yardstick.out) is used. User can also
+see the test results on Jenkins console log.
.. code:: bash
@@ -197,14 +203,17 @@ With the help of "influxdb_line_protocol", the json is transformed as a line str
-Influxdb api which is already implemented in `Influxdb`_ will post the data in line format into the database.
+Influxdb api which is already implemented in `Influxdb`_ will post the data in line format into the
+database.
``Displaying Results on Grafana dashboard:``
-- Once the test results are stored in Influxdb, dashboard configuration file(Json) which used to display the cyclictest results
-on Grafana need to be created by following the `Grafana-procedure`_ and then pushed into `yardstick-repo`_\
+- Once the test results are stored in Influxdb, dashboard configuration file(Json) which used to
+display the cyclictest results on Grafana need to be created by following the `Grafana-procedure`_
+and then pushed into `yardstick-repo`_\
-- Grafana can be accessed at `Login`_ using credentials opnfv/opnfv and used for visualizing the collected test data as shown in `Visual`_\
+- Grafana can be accessed at `Login`_ using credentials opnfv/opnfv and used for visualizing the
+collected test data as shown in `Visual`_\
.. figure:: images/Dashboard-screenshot-1.png
@@ -232,7 +241,8 @@ on Grafana need to be created by following the `Grafana-procedure`_ and then pus
Understanding Kvm4nfv Grafana Dashboard
---------------------------------------
-The Kvm4nfv dashboard found at http://testresults.opnfv.org/ currently supports graphical view of cyclictest. For viewing Kvm4nfv dashboarduse,
+The Kvm4nfv dashboard found at http://testresults.opnfv.org/ currently supports graphical view of
+cyclictest. For viewing Kvm4nfv dashboarduse,
.. code:: bash
@@ -267,7 +277,8 @@ Note:
1. Idle-Idle Graph
~~~~~~~~~~~~~~~~~~~~
-`Idle-Idle`_ graph displays the Average, Maximum and Minimum latency values obtained by running Idle_Idle test-type of the cyclictest.
+`Idle-Idle`_ graph displays the Average, Maximum and Minimum latency values obtained by running
+Idle_Idle test-type of the cyclictest.
Idle_Idle implies that no stress is applied on the Host or the Guest.
.. _Idle-Idle: http://testresults.opnfv.org/grafana/dashboard/db/kvmfornfv-cyclictest?panelId=10&fullscreen
@@ -279,8 +290,9 @@ Idle_Idle implies that no stress is applied on the Host or the Guest.
2. CPU_Stress-Idle Graph
~~~~~~~~~~~~~~~~~~~~~~~~~
-`Cpu_Stress-Idle`_ graph displays the Average, Maximum and Minimum latency values obtained by running Cpu-stress_Idle test-type of the cyclictest.
-Cpu-stress_Idle implies that CPU stress is applied on the Host and no stress on the Guest.
+`Cpu_Stress-Idle`_ graph displays the Average, Maximum and Minimum latency values obtained by
+running Cpu-stress_Idle test-type of the cyclictest. Cpu-stress_Idle implies that CPU stress is
+applied on the Host and no stress on the Guest.
.. _Cpu_stress-Idle: http://testresults.opnfv.org/grafana/dashboard/db/kvmfornfv-cyclictest?panelId=11&fullscreen
@@ -291,8 +303,9 @@ Cpu-stress_Idle implies that CPU stress is applied on the Host and no stress on
3. Memory_Stress-Idle Graph
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-`Memory_Stress-Idle`_ graph displays the Average, Maximum and Minimum latency values obtained by running Memory-stress_Idle test-type of the Cyclictest.
-Memory-stress_Idle implies that Memory stress is applied on the Host and no stress on the Guest.
+`Memory_Stress-Idle`_ graph displays the Average, Maximum and Minimum latency values obtained by
+running Memory-stress_Idle test-type of the Cyclictest. Memory-stress_Idle implies that Memory
+stress is applied on the Host and no stress on the Guest.
.. _Memory_Stress-Idle: http://testresults.opnfv.org/grafana/dashboard/db/kvmfornfv-cyclictest?panelId=12&fullscreen
@@ -303,8 +316,9 @@ Memory-stress_Idle implies that Memory stress is applied on the Host and no stre
4. IO_Stress-Idle Graph
~~~~~~~~~~~~~~~~~~~~~~~~~
-`IO_Stress-Idle`_ graph displays the Average, Maximum and Minimum latency values obtained by running IO-stress_Idle test-type of the Cyclictest.
-IO-stress_Idle implies that IO stress is applied on the Host and no stress on the Guest.
+`IO_Stress-Idle`_ graph displays the Average, Maximum and Minimum latency values obtained by running
+IO-stress_Idle test-type of the Cyclictest. IO-stress_Idle implies that IO stress is applied on the
+Host and no stress on the Guest.
.. _IO_Stress-Idle: http://testresults.opnfv.org/grafana/dashboard/db/kvmfornfv-cyclictest?panelId=13&fullscreen
@@ -313,6 +327,46 @@ IO-stress_Idle implies that IO stress is applied on the Host and no stress on th
:width: 100%
:align: center
+Packet Forwarding Results
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Understanding Kvm4nfv Grafana Dashboard
+---------------------------------------
+
+The Kvm4nfv dashboard found at http://testresults.opnfv.org/grafana/ currently supports graphical
+view of packet forwarding as well. For viewing Kvm4nfv packet forwarding dashboard use,
+
+.. code:: bash
+
+ http://testresults.opnfv.org/grafana/dashboard/db/kvmfornfv-packet-forwarding
+
+ The login details are:
+
+ Username: opnfv
+ Password: opnfv
+
+
+.. code:: bash
+
+ The JSON of the KVMFORNFV-Packet-Forwarding dashboard can be found at.,
+
+ $ git clone https://gerrit.opnfv.org/gerrit/yardstick.git
+ $ cd yardstick/dashboard
+ $ cat KVMFORNFV-Packet-Forwarding
+
+The Dashboard has five tables for each specific test of packet forwarding, one for each frame size.
+
+- KVM4NFV-PHY2PHY-TPUT-OVS_WITH_DPDK_AND_VHOST_USER
+- KVM4NFV-PVP-TPUT-OVS_WITH_DPDK_AND_VHOST_USER
+- KVM4NFV-PVP-TPUT-SRIOV
+- KVM4NFV-PVVP-TPUT-OVS_WITH_DPDK_AND_VHOST_USER
+- KVM4NFV-PVVP-TPUT-OVS_WITH_DPDK_AND_VHOST_USER
+
+Note:
+
+- For all graphs, X-axis is marked with time stamps, Y-axis with value in microsecond units.
+
Future Scope
--------------
-The future work will include adding the kvmfornfv_Packet-forwarding test results into Grafana and influxdb.
+------------
+The future work will include adding new tables to packet forwarding Grafana dashboard to publish the
+results of new packet forwarding test cases to be added if any.
diff --git a/docs/release/userguide/kvmfornfv_glossary.rst b/docs/release/userguide/kvmfornfv_glossary.rst
index aed5a971e..4dd731475 100644
--- a/docs/release/userguide/kvmfornfv_glossary.rst
+++ b/docs/release/userguide/kvmfornfv_glossary.rst
@@ -6,8 +6,8 @@
OPNFV Glossary
**************
-Danube 1.0
-------------
+Euphrates 1.0
+-------------
Contents
@@ -25,6 +25,11 @@ Arno
A river running through Tuscany and the name of the first OPNFV release.
+Apex
+
+ OPNFV Installation and Deployment tool based on the RDO Project's Triple-O
+ OpenStack installation tool.
+
API
Application Programming Interface
@@ -100,7 +105,7 @@ D
Danube
- Danube is the fourth release of OPNFV and also a river in Europe
+ A river in Europe and name of the Fourth OPNFV release.
Data plane
@@ -129,6 +134,15 @@ DSCP
--------
+E
+~
+
+Euphrates
+
+ Longest river of Western Asia and name of the fifth OPNFV release.
+
+--------
+
F
~
diff --git a/docs/release/userguide/live_migration.userguide.rst b/docs/release/userguide/live_migration.userguide.rst
index 9fa9b82fd..ff075ac26 100644
--- a/docs/release/userguide/live_migration.userguide.rst
+++ b/docs/release/userguide/live_migration.userguide.rst
@@ -72,16 +72,83 @@ QEMU v2.4.0
Ethernet controller: Intel Corporation Ethernet Controller 10-Gigabit
X540-AT2 (rev 01)
+
+Vhost-user with OVS/DPDK as backend:
+::
+The goal is to connect guests' virtio-net devices having vhost-user backend to OVS dpdkvhostuser
+ports and be able to run any kind of network traffic between them.
+
+Installation of OVS and DPDK:
+::
+Using vsperf,installing the OVS and DPDk. Prepare the directories
+
+.. code:: bash
+
+ mkdir -p /var/run/openvswitch
+ mount -t hugetlbfs -o pagesize=2048k none /dev/hugepages
+
+Load Kernel modules
+
+.. code:: bash
+
+ modprobe openvswitch
+
+For OVS setup, clean the environment
+
+.. code:: bash
+
+ rm -f /usr/local/var/run/openvswitch/vhost-user*
+ rm -f /usr/local/etc/openvswitch/conf.db
+
+Start database server
+
+.. code:: bash
+
+ ovsdb-tool create /usr/local/etc/openvswitch/conf.db $VSPERF/src/ovs/ovs/vswitchd/vswitch.ovsschema
+ ovsdb-server --remote=punix:$DB_SOCK --remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
+
+Start OVS
+
+.. code:: bash
+
+ ovs-vsctl --no-wait init
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0xf
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=1024
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
+
+Configure the bridge
+
+.. code:: bash
+
+ ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev
+ ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuser
+ ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuser
+
QEMU parameters:
::
-${qemu} -smp ${guest_cpus} -monitor unix:${qmp_sock},server,nowait -daemonize \
--cpu host,migratable=off,+invtsc,+tsc-deadline,pmu=off \
--realtime mlock=on -mem-prealloc -enable-kvm -m 1G \
--mem-path /mnt/hugetlbfs-1g \
--drive file=/root/minimal-centos1.qcow2,cache=none,aio=threads \
--netdev user,id=guest0,hostfwd=tcp:5555-:22 \
--device virtio-net-pci,netdev=guest0 \
--nographic -serial /dev/null -parallel /dev/null
+qemu-system-x86_64 -enable-kvm -cpu host -smp 2
+-chardev socket,id=char1,path=/usr/local/var/run/openvswitch/vhost-user1
+-netdev type=vhost-user,id=net1,chardev=char1,vhostforce \
+-device virtio-net-pci,netdev=net1,mac=52:54:00:12:34:56 \
+-chardev socket,id=char2,path=/usr/local/var/run/openvswitch/vhost-user2\
+-netdev type=vhost-user,id=net2,chardev=char2,vhostforce \
+-device virtio-net-pci,netdev=net2,mac=54:54:00:12:34:56 -m 1024 -mem-path /dev/hugepages \
+-mem-prealloc -realtime mlock=on -monitor unix:/tmp/qmp-sock-src,server,nowait \
+-balloon virtio -drive file=/root/guest1.qcow2 -vnc :1 &
+
+Run the standby qemu with -incoming tcp:-incoming tcp:${incoming_ip}:${migrate_port}:${migrate_port}
+
+For local live migration
+
+.. code:: bash
+
+ incoming ip=0
+
+For peer -peer live migration
+
+.. code:: bash
+
+ incoming ip=dest_host
Network connection
@@ -90,6 +157,16 @@ Network connection
:alt: live migration network connection
:figwidth: 80%
+Commands for performing live migration:
+::
+
+.. code:: bash
+
+ echo "migrate_set_speed 0" |nc -U /tmp/qmp-sock-src
+ echo "migrate_set_downtime 0.10" |nc -U /tmp/qmp-sock-src
+ echo "migrate -d tcp:0:4444" |nc -U /tmp/qmp-sock-src
+ #Wait till livemigration completed
+ echo "info migrate" | nc -U /tmp/qmp-sock-src
Test Result
-----------
diff --git a/docs/release/userguide/low_latency.userguide.rst b/docs/release/userguide/low_latency.userguide.rst
index f027b4939..e521a84c2 100644
--- a/docs/release/userguide/low_latency.userguide.rst
+++ b/docs/release/userguide/low_latency.userguide.rst
@@ -127,18 +127,18 @@ The above name signifies that,
Version Features
~~~~~~~~~~~~~~~~
-+-----------------------+------------------+-----------------+
-| **Test Name** | **Colorado** | **Danube** |
-| | | |
-+-----------------------+------------------+-----------------+
-| - Idle - Idle | ``Y`` | ``Y`` |
-+-----------------------+------------------+-----------------+
-| - Cpustress - Idle | | ``Y`` |
-+-----------------------+------------------+-----------------+
-| - Memorystress - Idle | | ``Y`` |
-+-----------------------+------------------+-----------------+
-| - IOstress - Idle | | ``Y`` |
-+-----------------------+------------------+-----------------+
++-----------------------+------------------+-----------------+-----------------+
+| **Test Name** | **Colorado** | **Danube** | **Euphrates** |
+| | | | |
++-----------------------+------------------+-----------------+-----------------+
+| - Idle - Idle | ``Y`` | ``Y`` | ``Y`` |
++-----------------------+------------------+-----------------+-----------------+
+| - Cpustress - Idle | | ``Y`` | ``Y`` |
++-----------------------+------------------+-----------------+-----------------+
+| - Memorystress - Idle | | ``Y`` | ``Y`` |
++-----------------------+------------------+-----------------+-----------------+
+| - IOstress - Idle | | ``Y`` | ``Y`` |
++-----------------------+------------------+-----------------+-----------------+
Idle-Idle test-type
@@ -165,8 +165,8 @@ Outputs Avg, Min and Max latency values.
Memory_Stress-Idle test-type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this type, the host is under memory stress where continuos memory operations are implemented to
-increase the Memory stress (Buffer stress).The cyclictest will run on the guest, where the guest is under
-no stress. It outputs Avg, Min and Max latency values.
+increase the Memory stress (Buffer stress).The cyclictest will run on the guest, where the guest is
+under no stress. It outputs Avg, Min and Max latency values.
.. figure:: images/memory-stress-idle-test-type.png
:name: memory-stress-idle test type
@@ -176,8 +176,8 @@ no stress. It outputs Avg, Min and Max latency values.
IO_Stress-Idle test-type
~~~~~~~~~~~~~~~~~~~~~~~~
The host is under constant Input/Output stress .i.e., multiple read-write operations are invoked to
-increase stress. Cyclictest will run on the guest VM that is launched on the same host, where the guest
-is under no stress. It outputs Avg, Min and Max latency values.
+increase stress. Cyclictest will run on the guest VM that is launched on the same host, where the
+guest is under no stress. It outputs Avg, Min and Max latency values.
.. figure:: images/io-stress-idle-test-type.png
:name: io-stress-idle test type
@@ -186,15 +186,15 @@ is under no stress. It outputs Avg, Min and Max latency values.
CPU_Stress-CPU_Stress test-type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Not implemented for Danube release.
+Not implemented for Euphrates release.
Memory_Stress-Memory_Stress test-type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Not implemented for Danube release.
+Not implemented for Euphrates release.
IO_Stress-IO_Stress test type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Not implemented for Danube release.
+Not implemented for Euphrates release.
2. Packet Forwarding Test cases
-------------------------------
@@ -203,9 +203,9 @@ to source after reaching its destination. This test case uses automated test-fra
OPNFV VSWITCHPERF project and a traffic generator (IXIA is used for kvm4nfv). Only latency results
generating test cases are triggered as a part of kvm4nfv daily job.
-Latency test measures the time required for a frame to travel from the originating device through the
-network to the destination device. Please note that RFC2544 Latency measurement will be superseded with
-a measurement of average latency over all successfully transferred packets or frames.
+Latency test measures the time required for a frame to travel from the originating device through
+the network to the destination device. Please note that RFC2544 Latency measurement will be
+superseded with a measurement of average latency over all successfully transferred packets or frames.
Packet forwarding test cases currently supports the following test types:
diff --git a/docs/release/userguide/openstack.rst b/docs/release/userguide/openstack.rst
index 929d2ba42..c35535860 100644
--- a/docs/release/userguide/openstack.rst
+++ b/docs/release/userguide/openstack.rst
@@ -2,19 +2,19 @@
.. http://creativecommons.org/licenses/by/4.0
-============================
-Danube OpenStack User Guide
-============================
+==============================
+Euphrates OpenStack User Guide
+==============================
OpenStack is a cloud operating system developed and released by the
`OpenStack project <https://www.openstack.org>`_. OpenStack is used in OPNFV
for controlling pools of compute, storage, and networking resources in a Pharos
compliant infrastructure.
-OpenStack is used in Danube to manage tenants (known in OpenStack as
+OpenStack is used in Euphrates to manage tenants (known in OpenStack as
projects),users, services, images, flavours, and quotas across the Pharos
infrastructure.The OpenStack interface provides the primary interface for an
-operational Danube deployment and it is from the "horizon console" that an
+operational Euphrates deployment and it is from the "horizon console" that an
OPNFV user will perform the majority of administrative and operational
activities on the deployment.
@@ -26,7 +26,7 @@ details and descriptions of how to configure and interact with the OpenStack
deployment.This guide can be used by lab engineers and operators to tune the
OpenStack deployment to your liking.
-Once you have configured OpenStack to your purposes, or the Danube
+Once you have configured OpenStack to your purposes, or the Euphrates
deployment meets your needs as deployed, an operator, or administrator, will
find the best guidance for working with OpenStack in the
`OpenStack administration guide <http://docs.openstack.org/user-guide-admin>`_.
@@ -46,6 +46,6 @@ and enter the username and password:
password: admin
Other methods of interacting with and configuring OpenStack,, like the REST API
-and CLI are also available in the Danube deployment, see the
+and CLI are also available in the Euphrates deployment, see the
`OpenStack administration guide <http://docs.openstack.org/user-guide-admin>`_
for more information on using those interfaces.
diff --git a/docs/release/userguide/packet_forwarding.userguide.rst b/docs/release/userguide/packet_forwarding.userguide.rst
index 31341a908..554d4efb9 100644
--- a/docs/release/userguide/packet_forwarding.userguide.rst
+++ b/docs/release/userguide/packet_forwarding.userguide.rst
@@ -33,6 +33,11 @@ Version Features
| | VSWITCHPERF software (PVP/PVVP) |
| | - Works with IXIA Traffic Generator |
+-----------------------------+---------------------------------------------------+
+| | - Test cases involving multiple guests (PVVP/PVPV)|
+| | included. |
+| Euphrates | - Implemented Yardstick Grafana dashboard to |
+| | publish results of packet forwarding test cases |
++-----------------------------+---------------------------------------------------+
VSPERF
------
@@ -90,7 +95,8 @@ environment and compilation of OVS, DPDK and QEMU is performed by
script **systems/build_base_machine.sh**. It should be executed under
user account, which will be used for vsperf execution.
- **Please Note:** Password-less sudo access must be configured for given user before script is executed.
+ **Please Note:** Password-less sudo access must be configured for given user before script is
+executed.
Execution of installation script:
@@ -123,9 +129,9 @@ For CentOS 7
## Python 3 Packages
-To avoid file permission errors and Python version issues, use virtualenv to create an isolated environment with Python3.
-The required Python 3 packages can be found in the `requirements.txt` file in the root of the test suite.
-They can be installed in your virtual environment like so:
+To avoid file permission errors and Python version issues, use virtualenv to create an isolated
+environment with Python3. The required Python 3 packages can be found in the `requirements.txt` file
+in the root of the test suite. They can be installed in your virtual environment like so:
.. code:: bash
@@ -150,7 +156,8 @@ To activate, simple run:
Working Behind a Proxy
~~~~~~~~~~~~~~~~~~~~~~
-If you're behind a proxy, you'll likely want to configure this before running any of the above. For example:
+If you're behind a proxy, you'll likely want to configure this before running any of the above.
+For example:
.. code:: bash
@@ -164,12 +171,14 @@ If you're behind a proxy, you'll likely want to configure this before running an
For other OS specific activation click `this link`_:
-.. _this link: http://artifacts.opnfv.org/vswitchperf/colorado/configguide/installation.html#other-requirements
+.. _this link:
+http://artifacts.opnfv.org/vswitchperf/colorado/configguide/installation.html#other-requirements
Traffic-Generators
------------------
-VSPERF supports many Traffic-generators. For configuring VSPERF to work with the available traffic-generator go through `this`_.
+VSPERF supports many Traffic-generators. For configuring VSPERF to work with the available traffic
+generator go through `this`_.
.. _this: http://artifacts.opnfv.org/vswitchperf/colorado/configguide/trafficgen.html
@@ -191,7 +200,8 @@ To see the list of traffic gens from the cli:
This guide provides the details of how to install
and configure the various traffic generators.
-As KVM4NFV uses only IXIA traffic generator, it is discussed here. For complete documentation regarding traffic generators please follow this `link`_.
+As KVM4NFV uses only IXIA traffic generator, it is discussed here. For complete documentation
+regarding traffic generators please follow this `link`_.
.. _link: https://gerrit.opnfv.org/gerrit/gitweb?p=vswitchperf.git;a=blob;f=docs/configguide/trafficgen.rst;h=85fc35b886d30db3b92a6b7dcce7ca742b70cbdc;hb=HEAD
@@ -201,8 +211,8 @@ IXIA Setup
Hardware Requirements
~~~~~~~~~~~~~~~~~~~~~
-VSPERF requires the following hardware to run tests: IXIA traffic generator (IxNetwork), a machine that
-runs the IXIA client software and a CentOS Linux release 7.1.1503 (Core) host.
+VSPERF requires the following hardware to run tests: IXIA traffic generator (IxNetwork), a machine
+that runs the IXIA client software and a CentOS Linux release 7.1.1503 (Core) host.
Installation
~~~~~~~~~~~~
@@ -217,11 +227,13 @@ You need to install IxNetworkTclClient$(VER_NUM)Linux.bin.tgz.
On the IXIA client software system
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Find the IxNetwork TCL server app (start -> All Programs -> IXIA -> IxNetwork -> IxNetwork_$(VER_NUM) -> IxNetwork TCL Server)
+Find the IxNetwork TCL server app
+ - (start -> All Programs -> IXIA -> IxNetwork -> IxNetwork_$(VER_NUM) -> IxNetwork TCL Server)
- Right click on IxNetwork TCL Server, select properties
- Under shortcut tab in the Target dialogue box make sure there is the argument "-tclport xxxx"
-where xxxx is your port number (take note of this port number you will need it for the 10_custom.conf file).
+where xxxx is your port number (take note of this port number you will need it for the
+10_custom.conf file).
.. figure:: images/IXIA1.png
:name: IXIA1 setup
@@ -336,11 +348,12 @@ To delete a src subdirectory and its contents to allow you to re-clone simply us
Configure the `./conf/10_custom.conf` file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The supplied `10_custom.conf` file must be modified, as it contains configuration items for which there are no reasonable default values.
+The supplied `10_custom.conf` file must be modified, as it contains configuration items for which
+there are no reasonable default values.
-The configuration items that can be added is not limited to the initial contents. Any configuration item
-mentioned in any .conf file in `./conf` directory can be added and that item will be overridden by the custom
-configuration value.
+The configuration items that can be added is not limited to the initial contents. Any configuration
+item mentioned in any .conf file in `./conf` directory can be added and that item will be overridden
+by the custom configuration value.
Using a custom settings file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -381,7 +394,9 @@ IP addresses. l2fwd can be found in <vswitchperf_dir>/src/l2fwd
Executing tests
~~~~~~~~~~~~~~~~
-Before running any tests make sure you have root permissions by adding the following line to /etc/sudoers:
+Before running any tests make sure you have root permissions by adding the following line to
+/etc/sudoers:
+
.. code:: bash
username ALL=(ALL) NOPASSWD: ALL
@@ -408,7 +423,8 @@ To run all tests:
./vsperf --conf-file=user_settings.py
-Some tests allow for configurable parameters, including test duration (in seconds) as well as packet sizes (in bytes).
+Some tests allow for configurable parameters, including test duration (in seconds) as well as packet
+sizes (in bytes).
.. code:: bash
@@ -467,7 +483,7 @@ In case, that VSPERF is executed in "trafficgen" mode, then configuration
of traffic generator can be modified through ``TRAFFIC`` dictionary passed to the
``--test-params`` option. It is not needed to specify all values of ``TRAFFIC``
dictionary. It is sufficient to specify only values, which should be changed.
-Detailed description of ``TRAFFIC`` dictionary can be found at: ref:`configuration-of-traffic-dictionary`.
+Detailed notes on ``TRAFFIC`` dictionary can be found at: ref:`configuration-of-traffic-dictionary`.
Example of execution of VSPERF in "trafficgen" mode:
@@ -499,8 +515,8 @@ please refer to figure.2
:width: 100%
:align: center
-Packet Forwarding Guest Scenario
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Packet Forwarding Guest Scenario (PXP Deployment)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here the guest is a Virtual Machine (VM) launched by using vloop_vnf provided by vsperf project
on host/DUT using Qemu. In this latency test the time taken by the frame/packet to travel from the
@@ -512,6 +528,173 @@ The resulting latency values will define the performance of installed kernel.
:width: 100%
:align: center
+Every testcase uses one of the supported deployment scenarios to setup test environment.
+The controller responsible for a given scenario configures flows in the vswitch to route
+traffic among physical interfaces connected to the traffic generator and virtual
+machines. VSPERF supports several deployments including PXP deployment, which can
+setup various scenarios with multiple VMs.
+
+These scenarios are realized by VswitchControllerPXP class, which can configure and
+execute given number of VMs in serial or parallel configurations. Every VM can be
+configured with just one or an even number of interfaces. In case that VM has more than
+2 interfaces, then traffic is properly routed among pairs of interfaces.
+
+Example of traffic routing for VM with 4 NICs in serial configuration:
+
+.. code-block:: console
+
+ +------------------------------------------+
+ | VM with 4 NICs |
+ | +---------------+ +---------------+ |
+ | | Application | | Application | |
+ | +---------------+ +---------------+ |
+ | ^ | ^ | |
+ | | v | v |
+ | +---------------+ +---------------+ |
+ | | logical ports | | logical ports | |
+ | | 0 1 | | 2 3 | |
+ +--+---------------+----+---------------+--+
+ ^ : ^ :
+ | | | |
+ : v : v
+ +-----------+---------------+----+---------------+----------+
+ | vSwitch | 0 1 | | 2 3 | |
+ | | logical ports | | logical ports | |
+ | previous +---------------+ +---------------+ next |
+ | VM or PHY ^ | ^ | VM or PHY|
+ | port -----+ +------------+ +---> port |
+ +-----------------------------------------------------------+
+
+
+It is also possible to define different number of interfaces for each VM to better
+simulate real scenarios.
+
+The number of VMs involved in the test and the type of their connection is defined
+by deployment name as follows:
+
+ * ``pvvp[number]`` - configures scenario with VMs connected in series with
+ optional ``number`` of VMs. In case that ``number`` is not specified, then
+ 2 VMs will be used.
+
+ Example of 2 VMs in a serial configuration:
+
+ .. code-block:: console
+
+ +----------------------+ +----------------------+
+ | 1st VM | | 2nd VM |
+ | +---------------+ | | +---------------+ |
+ | | Application | | | | Application | |
+ | +---------------+ | | +---------------+ |
+ | ^ | | | ^ | |
+ | | v | | | v |
+ | +---------------+ | | +---------------+ |
+ | | logical ports | | | | logical ports | |
+ | | 0 1 | | | | 0 1 | |
+ +---+---------------+--+ +---+---------------+--+
+ ^ : ^ :
+ | | | |
+ : v : v
+ +---+---------------+---------+---------------+--+
+ | | 0 1 | | 3 4 | |
+ | | logical ports | vSwitch | logical ports | |
+ | +---------------+ +---------------+ |
+ | ^ | ^ | |
+ | | +-----------------+ v |
+ | +----------------------------------------+ |
+ | | physical ports | |
+ | | 0 1 | |
+ +---+----------------------------------------+---+
+ ^ :
+ | |
+ : v
+ +------------------------------------------------+
+ | |
+ | traffic generator |
+ | |
+ +------------------------------------------------+
+
+* ``pvpv[number]`` - configures scenario with VMs connected in parallel with
+ optional ``number`` of VMs. In case that ``number`` is not specified, then
+ 2 VMs will be used. Multistream feature is used to route traffic to particular
+ VMs (or NIC pairs of every VM). It means, that VSPERF will enable multistream
+ feaure and sets the number of streams to the number of VMs and their NIC
+ pairs. Traffic will be dispatched based on Stream Type, i.e. by UDP port,
+ IP address or MAC address.
+
+ Example of 2 VMs in a parallel configuration, where traffic is dispatched
+ based on the UDP port.
+
+ .. code-block:: console
+
+ +----------------------+ +----------------------+
+ | 1st VM | | 2nd VM |
+ | +---------------+ | | +---------------+ |
+ | | Application | | | | Application | |
+ | +---------------+ | | +---------------+ |
+ | ^ | | | ^ | |
+ | | v | | | v |
+ | +---------------+ | | +---------------+ |
+ | | logical ports | | | | logical ports | |
+ | | 0 1 | | | | 0 1 | |
+ +---+---------------+--+ +---+---------------+--+
+ ^ : ^ :
+ | | | |
+ : v : v
+ +---+---------------+---------+---------------+--+
+ | | 0 1 | | 3 4 | |
+ | | logical ports | vSwitch | logical ports | |
+ | +---------------+ +---------------+ |
+ | ^ | ^ : |
+ | | ......................: : |
+ | UDP | UDP : | : |
+ | port| port: +--------------------+ : |
+ | 0 | 1 : | : |
+ | | : v v |
+ | +----------------------------------------+ |
+ | | physical ports | |
+ | | 0 1 | |
+ +---+----------------------------------------+---+
+ ^ :
+ | |
+ : v
+ +------------------------------------------------+
+ | |
+ | traffic generator |
+ | |
+ +------------------------------------------------+
+
+
+PXP deployment is backward compatible with PVP deployment, where ``pvp`` is
+an alias for ``pvvp1`` and it executes just one VM.
+
+The number of interfaces used by VMs is defined by configuration option
+``GUEST_NICS_NR``. In case that more than one pair of interfaces is defined
+for VM, then:
+
+ * for ``pvvp`` (serial) scenario every NIC pair is connected in serial
+ before connection to next VM is created
+ * for ``pvpv`` (parallel) scenario every NIC pair is directly connected
+ to the physical ports and unique traffic stream is assigned to it
+
+Examples:
+
+ * Deployment ``pvvp10`` will start 10 VMs and connects them in series
+ * Deployment ``pvpv4`` will start 4 VMs and connects them in parallel
+ * Deployment ``pvpv1`` and GUEST_NICS_NR = [4] will start 1 VM with
+ 4 interfaces and every NIC pair is directly connected to the
+ physical ports
+ * Deployment ``pvvp`` and GUEST_NICS_NR = [2, 4] will start 2 VMs;
+ 1st VM will have 2 interfaces and 2nd VM 4 interfaces. These interfaces
+ will be connected in serial, i.e. traffic will flow as follows:
+ PHY1 -> VM1_1 -> VM1_2 -> VM2_1 -> VM2_2 -> VM2_3 -> VM2_4 -> PHY2
+
+Note: In case that only 1 or more than 2 NICs are configured for VM,
+then ``testpmd`` should be used as forwarding application inside the VM.
+As it is able to forward traffic between multiple VM NIC pairs.
+
+Note: In case of ``linux_bridge``, all NICs are connected to the same
+bridge inside the VM.
+
Packet Forwarding SRIOV Scenario
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -622,12 +805,92 @@ PCI passthrough support.
Note: Qemu with PCI passthrough support can be used only with PVP test
deployment.
+Guest Core and Thread Binding
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+VSPERF provides options to achieve better performance by guest core binding and
+guest vCPU thread binding as well. Core binding is to bind all the qemu threads.
+Thread binding is to bind the house keeping threads to some CPU and vCPU thread to
+some other CPU, this helps to reduce the noise from qemu house keeping threads.
+
+
+.. code-block:: python
+
+ GUEST_CORE_BINDING = [('#EVAL(6+2*#VMINDEX)', '#EVAL(7+2*#VMINDEX)')]
+
+**NOTE** By default the GUEST_THREAD_BINDING will be none, which means same as
+the GUEST_CORE_BINDING, i.e. the vcpu threads are sharing the physical CPUs with
+the house keeping threads. Better performance using vCPU thread binding can be
+achieved by enabling affinity in the custom configuration file.
+
+For example, if an environment requires 28,29 to be core binded and 30,31 for
+guest thread binding to achieve better performance.
+
+.. code-block:: python
+
+ VNF_AFFINITIZATION_ON = True
+ GUEST_CORE_BINDING = [('28','29')]
+ GUEST_THREAD_BINDING = [('30', '31')]
+
+Qemu CPU features
+^^^^^^^^^^^^^^^^^
+
+QEMU default to a compatible subset of performance enhancing cpu features.
+To pass all available host processor features to the guest.
+
+.. code-block:: python
+
+ GUEST_CPU_OPTIONS = ['host,migratable=off']
+
+**NOTE** To enhance the performance, cpu features tsc deadline timer for guest,
+the guest PMU, the invariant TSC can be provided in the custom configuration file.
+
+Selection of loopback application for tests with VMs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To select the loopback applications which will forward packets inside VMs,
+the following parameter should be configured:
+
+.. code-block:: python
+
+ GUEST_LOOPBACK = ['testpmd']
+
+or use ``--test-params`` CLI argument:
+
+.. code-block:: console
+
+ $ ./vsperf --conf-file=<path_to_custom_conf>/10_custom.conf \
+ --test-params "GUEST_LOOPBACK=['testpmd']"
+
+Supported loopback applications are:
+
+.. code-block:: console
+
+ 'testpmd' - testpmd from dpdk will be built and used
+ 'l2fwd' - l2fwd module provided by Huawei will be built and used
+ 'linux_bridge' - linux bridge will be configured
+ 'buildin' - nothing will be configured by vsperf; VM image must
+ ensure traffic forwarding between its interfaces
+
+Guest loopback application must be configured, otherwise traffic
+will not be forwarded by VM and testcases with VM related deployments
+will fail. Guest loopback application is set to 'testpmd' by default.
+
+**NOTE:** In case that only 1 or more than 2 NICs are configured for VM,
+then 'testpmd' should be used. As it is able to forward traffic between
+multiple VM NIC pairs.
+
+**NOTE:** In case of linux_bridge, all guest NICs are connected to the same
+bridge inside the guest.
+
Results
~~~~~~~
-The results for the packet forwarding test cases are uploaded to artifacts.
-The link for the same can be found below
+The results for the packet forwarding test cases are uploaded to artifacts and
+also published on Yardstick Grafana dashboard.
+The links for the same can be found below
.. code:: bash
http://artifacts.opnfv.org/kvmfornfv.html
+ http://testresults.opnfv.org/KVMFORNFV-Packet-Forwarding
diff --git a/tests/vsperf.conf b/tests/vsperf.conf
index a5055e770..ac5505261 100755
--- a/tests/vsperf.conf
+++ b/tests/vsperf.conf
@@ -26,7 +26,14 @@ WHITELIST_NICS = ['02:00.0', '02:00.1']
##############################
# VNF configuration
##############################
-GUEST_IMAGE = ['/home/jenkins/vloop-vnf-ubuntu-14.04_20160804.qcow2']
+GUEST_IMAGE = ['/home/jenkins/vloop-vnf-ubuntu-14.04_20160804.qcow2','/home/jenkins/vloop-vnf-ubuntu-14.04_20160823.qcow2']
+GUEST_SMP = ['2']
+GUEST_CORE_BINDING = [('27','28'),('29','30')]
+GUEST_THREAD_BINDING = [('33','34'),('35','36')]
+VNF_AFFINITIZATION_ON = True
+# loopback application for tests with VMs
+GUEST_LOOPBACK=['testpmd','testpmd']
+TESTPMD_FWD_MODE = 'io'
#Using kvmfornfv built qemu for launching guest vms.
PATHS['qemu'] = {
'type' : 'bin',
diff --git a/tests/vsperf.conf.sriov b/tests/vsperf.conf.sriov
index c37414c1b..f476848b7 100755
--- a/tests/vsperf.conf.sriov
+++ b/tests/vsperf.conf.sriov
@@ -28,6 +28,13 @@ PATHS['dpdk']['src']['modules'] = ['uio', 'vfio-pci']
# VNF configuration
#############################
GUEST_IMAGE = ['/home/jenkins/vloop-vnf-ubuntu-14.04_20160804.qcow2']
+GUEST_SMP = ['2']
+GUEST_CORE_BINDING = [('27','28'),('29','30')]
+GUEST_THREAD_BINDING = [('33','34'),('35','36')]
+VNF_AFFINITIZATION_ON = True
+# loopback application for tests with VMs
+GUEST_LOOPBACK=['testpmd','testpmd']
+TESTPMD_FWD_MODE = 'io'
#Using kvmfornfv built qemu for launching guest vms.
PATHS['qemu'] = {
'type' : 'bin',