aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml20
-rw-r--r--docs/release/configguide/Auto-featureconfig.rst241
-rw-r--r--docs/release/configguide/Auto-postinstall.rst28
-rw-r--r--docs/release/configguide/auto-OPFNV-fuel.jpgbin0 -> 189899 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-generic.jpgbin0 -> 154476 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-initial.jpgbin0 -> 118641 bytes
-rw-r--r--docs/release/configguide/auto-repo-folders.jpgbin0 -> 162411 bytes
-rw-r--r--docs/release/configguide/index.rst17
-rw-r--r--docs/release/installation/UC01-feature.userguide.rst84
-rw-r--r--docs/release/installation/UC01-installation.instruction.rst212
-rw-r--r--docs/release/installation/UC02-feature.userguide.rst145
-rw-r--r--docs/release/installation/UC02-installation.instruction.rst195
-rw-r--r--docs/release/installation/UC03-feature.userguide.rst100
-rw-r--r--docs/release/installation/UC03-installation.instruction.rst212
-rw-r--r--docs/release/installation/index.rst15
-rw-r--r--docs/release/release-notes/Auto-release-notes.rst78
-rw-r--r--docs/release/release-notes/auto-project-activities.pngbin0 -> 55670 bytes
-rw-r--r--docs/release/release-notes/index.rst2
-rw-r--r--docs/release/userguide/UC01-feature.userguide.rst6
-rw-r--r--docs/release/userguide/UC02-feature.userguide.rst49
-rw-r--r--docs/release/userguide/UC03-feature.userguide.rst53
-rw-r--r--docs/release/userguide/auto-UC02-TC-mapping.pngbin0 -> 48301 bytes
-rw-r--r--docs/release/userguide/auto-UC02-cardinalities.pngbin0 -> 36684 bytes
-rw-r--r--docs/release/userguide/auto-UC02-data1.jpgbin122920 -> 51570 bytes
-rw-r--r--docs/release/userguide/auto-UC02-data2.jpgbin378585 -> 217832 bytes
-rw-r--r--docs/release/userguide/auto-UC02-data3.jpgbin462367 -> 274235 bytes
-rw-r--r--docs/release/userguide/auto-UC02-logic.pngbin0 -> 39141 bytes
-rw-r--r--docs/release/userguide/auto-UC03-TC-archit.pngbin0 -> 47579 bytes
-rw-r--r--docs/release/userguide/auto-UC03-TestCases.pngbin0 -> 20920 bytes
-rw-r--r--docs/release/userguide/index.rst2
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfCloud.py150
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMain.py1
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMgTestDef.py120
-rw-r--r--lib/auto/testcase/resiliency/clouds.yaml42
34 files changed, 669 insertions, 1103 deletions
diff --git a/INFO.yaml b/INFO.yaml
index b3f0c09..aee9a7b 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -23,21 +23,17 @@ realtime_discussion:
server: 'freenode.net'
channel: '#opnfv-auto'
meetings:
- - type: 'gotomeeting+irc'
- agenda: # eg: 'https://wiki.opnfv.org/display/'
- url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ - type: 'zoom+irc'
+ agenda: 'https://wiki.opnfv.org/display/AUTO/Auto+Project+Meetings'
+ url: 'https://zoom.us/j/2362828999'
server: 'freenode.net'
- channel: '#opnfv-meeting'
- repeats: 'weekly'
- time: # eg: '16:00 UTC'
+ channel: '#opnfv-auto'
+ repeats: 'weekly, mondays'
+ time: '14:00 UTC'
repositories:
- 'auto'
committers:
- <<: *opnfv_auto_ptl
- - name: 'Aric Gardner'
- email: 'agardner@linuxfoundation.org'
- company: 'linuxfoundation.org'
- id: 'agardner'
- name: 'Harry Huang'
email: 'huangxiangyu5@huawei.com'
company: 'huawei.com'
@@ -54,6 +50,10 @@ committers:
email: 'oul.gd@chinatelecom.cn'
company: 'chinatelecom.cn'
id: 'ouliang1'
+ - name: 'Gerard Damm'
+ email: 'gerard.damm@wipro.com'
+ company: 'Wipro'
+ id: 'gerard_damm'
tsc:
# yamllint disable rule:line-length
approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html'
diff --git a/docs/release/configguide/Auto-featureconfig.rst b/docs/release/configguide/Auto-featureconfig.rst
new file mode 100644
index 0000000..4e9705f
--- /dev/null
+++ b/docs/release/configguide/Auto-featureconfig.rst
@@ -0,0 +1,241 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+Introduction
+============
+
+This document describes the software and hardware reference frameworks used by Auto,
+and provides guidelines on how to perform configurations and additional installations.
+
+
+Goal
+====
+
+The goal of `Auto <http://docs.opnfv.org/en/latest/release/release-notes.html>`_ installation and configuration is to prepare
+an environment where the `Auto use cases <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/userguide/index.html#auto-userguide>`_
+can be assessed, i.e. where the corresponding test cases can be executed and their results can be collected.
+
+An instance of ONAP needs to be present, as well as a number of deployed VNFs, in the scope of the use cases.
+
+The initial Auto use cases cover:
+
+* Edge Cloud (increased autonomy and automation for managing Edge VNFs)
+* Resilience Improvements through ONAP (reduced recovery time for VNFs and end-to-end services in case of failure or suboptimal performance)
+* Enterprise vCPE (automation, cost optimization, and performance assurance of enterprise connectivity to Data Centers and the Internet)
+
+The general idea of Auto is to install an OPNFV environment (comprising at least one Cloud Manager),
+an ONAP instance, ONAP-deployed VNFs as required by use cases, possibly additional cloud managers not
+already installed during the OPNFV environment setup, traffic generators, and the Auto-specific software
+for the use cases (which can include test frameworks such as `Robot <http://robotframework.org/>`_ or `Functest <http://docs.opnfv.org/en/latest/submodules/functest/docs/release/release-notes/index.html#functest-releasenotes>`_).
+The ONAP instance needs to be configured with policies and closed-loop controls (also as required by use cases),
+and the test framework controls the execution and result collection of all the test cases.
+
+The following diagram illustrates two execution environments, for x86 architectures and for Arm architectures.
+The installation process depends on the underlying architecture, since certain components may require a
+specific binary-compatible version for a given x86 or Arm architecture. The preferred variant of ONAP is one
+that runs on Kubernetes, while all VNF types are of interest to Auto: VM-based or containerized (on any cloud
+manager), for x86 or for Arm. The initial VM-based VNFs will cover OpenStack, and in future versions,
+additional cloud managers will be considered. The configuration of ONAP and of test cases should not depend
+on the architecture.
+
+.. image:: auto-installTarget-generic.jpg
+
+
+For each component, various installer tools will be selected (based on simplicity and performance), and
+may change from one Auto release to the next. For example, the most natural installer for ONAP should be
+OOM (ONAP Operations Manager).
+
+The initial version of Auto will focus on OpenStack VM-based VNFs, onboarded and deployed via ONAP API
+(not by ONAP GUI, for the purpose of automation). ONAP is installed on Kubernetes. Two servers from LaaS
+are used: one to support an OpenStack instance as provided by the OPNFV installation via Fuel/MCP, and
+the other to support ONAP with Kubernetes and Docker. Therefore, the VNF execution environment is the
+server with the OpenStack instance.
+
+.. image:: auto-installTarget-initial.jpg
+
+
+Jenkins will be used for Continuous Integration in OPNFV releases, to ensure that the latest master
+branch of Auto is always working.
+
+Moreover, Auto will offer an API, which can be imported as a module, and can be accessed for example
+by a web application. The following diagram shows the planned structure for the Auto Git repository,
+supporting this module, as well as the installation scripts, test case software, utilities, and documentation.
+
+.. image:: auto-repo-folders.jpg
+
+
+
+Pre-configuration activities
+============================
+
+The following resources will be required for the initial version of Auto:
+
+* two LaaS (OPNFV Lab-as-a-Service) pods, with their associated network information. Later, other types of target pods will be supported.
+* the `Auto Git repository <https://git.opnfv.org/auto/tree/>`_ (clone from `Gerrit Auto <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_)
+
+
+
+Hardware configuration
+======================
+
+<TBC>
+
+
+
+Feature configuration
+=====================
+
+Environment installation
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Current Auto work in progress is captured in the `Auto Lab Deployment wiki page <https://wiki.opnfv.org/display/AUTO/Auto+Lab+Deployment>`_.
+
+
+OPNFV with OpenStack
+~~~~~~~~~~~~~~~~~~~~
+
+The Auto installation uses the Fuel/MCP installer for the OPNFV environment (see the
+`OPNFV download page <https://www.opnfv.org/software/downloads>`_).
+
+The following figure summarizes the two installation cases: virtual or baremetal.
+This OPNFV installer starts with installing a Salt Master, which then configures
+subnets and bridges, and install VMs (e.g., for controllers and compute nodes)
+and an OpenStack instance with predefined credentials.
+
+.. image:: auto-OPFNV-fuel.jpg
+
+
+The Auto version of OPNFV installation configures additional resources for the OpenStack virtual pod,
+as compared to the default installation. Examples of manual steps are as follows:
+
+.. code-block:: console
+
+ 1. mkdir /opt/fuel
+ 2. cd /opt/fuel
+ 3. git clone https://git.opnfv.org/fuel
+ 4. cd fuel
+ 5. vi /opt/fuel/fuel/mcp/config/scenario/os-nosdn-nofeature-noha.yaml
+
+
+These lines can be added to configure more resources:
+
+.. code-block:: yaml
+
+ gtw01:
+ ram: 2048
+ + cmp01:
+ + vcpus: 16
+ + ram: 65536
+ + disk: 40
+ + cmp02:
+ + vcpus: 16
+ + ram: 65536
+ + disk: 40
+
+
+The final step deploys OpenStack (duration: approximately between 30 and 45 minutes).
+
+.. code-block:: console
+
+ 6. ci/deploy.sh -l UNH-LaaS -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log
+
+
+
+ONAP on Kubernetes
+~~~~~~~~~~~~~~~~~~
+
+An ONAP installation on OpenStack has also been investigated, but we focus here on
+the ONAP on Kubernetes version.
+
+The initial focus is on x86 architectures. The ONAP DCAE component for a while was not operational
+on Kubernetes, and had to be installed separately on OpenStack. So the ONAP instance was a hybrid,
+with all components except DCAE running on Kubernetes, and DCAE running separately on OpenStack.
+
+For Arm architectures, specialized Docker images are being developed to provide Arm architecture
+binary compatibility.
+
+The goal for the first release of Auto is to use an ONAP instance where DCAE also runs on Kubernetes,
+for both architectures.
+
+The ONAP reference for this installation is detailed `here <https://wiki.onap.org/display/DW/ONAP+on+Kubernetes>`_.
+
+Examples of manual steps for the deploy procedure are as follows:
+
+.. code-block:: console
+
+ 1 git clone https://gerrit.onap.org/r/oom
+ 2 cd oom
+ 3 git pull https://gerrit.onap.org/r/oom refs/changes/19/32019/6
+ 4 cd install/rancher
+ 5 ./oom_rancher_setup.sh -b master -s <your external ip> -e onap
+ 6 cd oom/kubernetes/config
+ 7 (modify onap-parameters.yaml for VIM connection (manual))
+ 8 ./createConfig.sh -n onap
+ 9 cd ../oneclick
+ 10 ./createAll.bash -n onap
+
+
+
+ONAP configuration
+^^^^^^^^^^^^^^^^^^
+
+This section describes the logical steps performed by the Auto scripts to prepare ONAP and VNFs.
+
+
+VNF deployment
+~~~~~~~~~~~~~~
+
+<TBC; pre-onboarding, onboarding, deployment>
+
+
+Policy and closed-loop control configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+<TBC>
+
+
+Traffic Generator configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+<TBC>
+
+
+
+Test Case software installation and execution control
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+<TBC>
+
+
+
+Installation health-check
+=========================
+
+<TBC; the Auto installation will self-check, but indicate here manual steps to double-check that the installation was successful>
+
+
+
+
+References
+==========
+
+Auto Wiki pages:
+
+* `Auto wiki main page <https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095>`_
+* `Auto Lab Deployment wiki page <https://wiki.opnfv.org/display/AUTO/Auto+Lab+Deployment>`_
+
+
+OPNFV documentation on Auto:
+
+* `Auto release notes <http://docs.opnfv.org/en/latest/release/release-notes.html>`_
+* `Auto use case user guides <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/userguide/index.html#auto-userguide>`_
+
+
+Git&Gerrit Auto repositories:
+
+* `Auto Git repository <https://git.opnfv.org/auto/tree/>`_
+* `Gerrit for Auto project <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_
+
diff --git a/docs/release/configguide/Auto-postinstall.rst b/docs/release/configguide/Auto-postinstall.rst
new file mode 100644
index 0000000..500a99d
--- /dev/null
+++ b/docs/release/configguide/Auto-postinstall.rst
@@ -0,0 +1,28 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+********************************
+Auto Post Installation Procedure
+********************************
+
+<TBC; normally, the installation is self-contained and there should be no need for post-installation manual steps;
+possibly input for CI toolchain and deployment pipeline in first section>
+
+
+Automated post installation activities
+======================================
+<TBC if needed>
+
+
+<Project> post configuration procedures
+=======================================
+<TBC if needed>
+
+
+Platform components validation
+==============================
+<TBC if needed>
+
diff --git a/docs/release/configguide/auto-OPFNV-fuel.jpg b/docs/release/configguide/auto-OPFNV-fuel.jpg
new file mode 100644
index 0000000..706d997
--- /dev/null
+++ b/docs/release/configguide/auto-OPFNV-fuel.jpg
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-generic.jpg b/docs/release/configguide/auto-installTarget-generic.jpg
new file mode 100644
index 0000000..3f94871
--- /dev/null
+++ b/docs/release/configguide/auto-installTarget-generic.jpg
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-initial.jpg b/docs/release/configguide/auto-installTarget-initial.jpg
new file mode 100644
index 0000000..edc6509
--- /dev/null
+++ b/docs/release/configguide/auto-installTarget-initial.jpg
Binary files differ
diff --git a/docs/release/configguide/auto-repo-folders.jpg b/docs/release/configguide/auto-repo-folders.jpg
new file mode 100644
index 0000000..ee88866
--- /dev/null
+++ b/docs/release/configguide/auto-repo-folders.jpg
Binary files differ
diff --git a/docs/release/configguide/index.rst b/docs/release/configguide/index.rst
new file mode 100644
index 0000000..ba1a3da
--- /dev/null
+++ b/docs/release/configguide/index.rst
@@ -0,0 +1,17 @@
+.. _auto-configguide:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*****************************************************
+OPNFV Auto (ONAP-Automated OPNFV) Configuration Guide
+*****************************************************
+
+.. toctree::
+ :numbered:
+ :maxdepth: 3
+
+ Auto-featureconfig.rst
+ Auto-postinstall.rst
diff --git a/docs/release/installation/UC01-feature.userguide.rst b/docs/release/installation/UC01-feature.userguide.rst
deleted file mode 100644
index 5da0865..0000000
--- a/docs/release/installation/UC01-feature.userguide.rst
+++ /dev/null
@@ -1,84 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-
-================================================================
-Auto User Guide: Use Case 1 Edge Cloud
-================================================================
-
-This document provides the user guide for Fraser release of Auto,
-specifically for Use Case 1: Edge Cloud.
-
-.. contents::
- :depth: 3
- :local:
-
-
-Description
-===========
-
-This use case aims at showcasing the benefits of using ONAP for autonomous Edge Cloud management.
-
-A high level of automation of VNF lifecycle event handling after launch is enabled by ONAP policies
-and closed-loop controls, which take care of most lifecycle events (start, stop, scale up/down/in/out,
-recovery/migration for HA) as well as their monitoring and SLA management.
-
-Multiple types of VNFs, for different execution environments, are first approved in the catalog thanks
-to the onboarding process, and then can be deployed and handled by multiple controllers in a systematic way.
-
-This results in management efficiency (lower control/automation overhead) and high degree of autonomy.
-
-
-Preconditions:
-#. hardware environment in which Edge cloud may be deployed
-#. an Edge cloud has been deployed and is ready for operation
-#. ONAP has been deployed onto a Cloud, and is interfaced (i.e. provisioned for API access) to the Edge cloud
-
-
-
-Main Success Scenarios:
-
-* lifecycle management - stop, stop, scale (dependent upon telemetry)
-
-* recovering from faults (detect, determine appropriate response, act); i.e. exercise closed-loop policy engine in ONAP
-
- * verify mechanics of control plane interaction
-
-* collection of telemetry for machine learning
-
-
-Details on the test cases corresponding to this use case:
-
-* Environment check
-
- * Basic environment check: Create test script to check basic VIM (OpenStack), ONAP, and VNF are up and running
-
-* VNF lifecycle management
-
- * VNF Instance Management: Validation of VNF Instance Management which includes VNF instantiation, VNF State Management and termination
-
- * Tacker Monitoring Driver (VNFMonitorPing):
-
- * Write Tacker Monitor driver to handle monitor_call and based on return state value create custom events
- * If Ping to VNF fails, trigger below events
-
- * Event 1 : Collect failure logs from VNF
- * Event 2 : Soft restart/respawn the VNF
-
- * Integrate with Telemetry
-
- * Create TOSCA template policies to implement ceilometer data collection service
- * Collect CPU utilization data, compare with threshold, and perform action accordingly (respawn, scale-in/scale-out)
-
-
-
-Test execution high-level description
-=====================================
-
-<TBC>
-
-
-
-
diff --git a/docs/release/installation/UC01-installation.instruction.rst b/docs/release/installation/UC01-installation.instruction.rst
deleted file mode 100644
index 9ecb8bd..0000000
--- a/docs/release/installation/UC01-installation.instruction.rst
+++ /dev/null
@@ -1,212 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-========
-Abstract
-========
-
-This document describes how to install OPNFV Auto Use Case 1: Edge Cloud, its dependencies and required system resources.
-
-.. contents::
- :depth: 3
- :local:
-
-Version history
----------------------
-
-+--------------------+--------------------+--------------------+--------------------+
-| **Date** | **Ver.** | **Author** | **Comment** |
-| | | | |
-+--------------------+--------------------+--------------------+--------------------+
-| 2015-04-14 | 0.1.0 | Jonas Bjurel | First draft |
-| | | | |
-+--------------------+--------------------+--------------------+--------------------+
-| | 0.1.1 | | |
-| | | | |
-+--------------------+--------------------+--------------------+--------------------+
-| | 1.0 | | |
-| | | | |
-| | | | |
-+--------------------+--------------------+--------------------+--------------------+
-
-
-Introduction
-============
-<INTRODUCTION TO THE SCOPE AND INTENTION OF THIS DOCUMENT AS WELL AS TO THE SYSTEM TO BE INSTALLED>
-
-<EXAMPLE>:
-
-This document describes the supported software and hardware configurations for the
-Fuel OPNFV reference platform as well as providing guidelines on how to install and
-configure such reference system.
-
-Although the available installation options gives a high degree of freedom in how the system is set-up,
-with what architecture, services and features, etc., not nearly all of those permutations provides
-a OPNFV compliant reference architecture. Following the guidelines in this document ensures
-a result that is OPNFV compliant.
-
-The audience of this document is assumed to have good knowledge in network and Unix/Linux administration.
-
-
-Preface
-=======
-<DESCRIBE NEEDED PREREQUISITES, PLANNING, ETC.>
-
-<EXAMPLE>:
-
-Before starting the installation of Fuel@OPNFV, some planning must preceed.
-
-First of all, the Fuel@OPNFV .iso image needs to be retrieved,
-the Latest stable Arno release of Fuel@OPNFV can be found here: <www.opnfv.org/abc/def>
-
-Alternatively, you may build the .iso from source by cloning the opnfv/genesis git repository:
-<git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis>
-Check-out the Arno release:
-<cd genesis; git checkout arno>
-Goto the fuel directory and build the .iso
-<cd fuel/build; make all>
-
-Familiarize yourself with the Fuel 6.0.1 version by reading the following documents:
-- abc <http://wiki.openstack.org/abc>
-- def <http://wiki.openstack.org/def>
-- ghi <http://wiki.openstack.org/ghi>
-
-Secondly, a number of deployment specific parameters must be collected, those are:
-
-1. Provider sub-net and gateway information
-
-2. Provider VLAN information
-
-3. Provider DNS addresses
-
-4. Provider NTP addresses
-
-This information will be needed for the configuration procedures provided in this document.
-
-
-Hardware requirements
-=====================
-<PROVIDE A LIST OF MINIMUM HARDWARE REQUIREMENTS NEEDED FOR THE INSTALL>
-
-<EXAMPLE>:
-
-Following minimum hardware requirements must be met for installation of Fuel@OPNFV:
-
-+--------------------+----------------------------------------------------+
-| **HW Aspect** | **Requirement** |
-| | |
-+--------------------+----------------------------------------------------+
-| **# of servers** | Minimum 5 (3 for non redundant deployment) |
-| | 1 Fuel deployment master (may be virtualized) |
-| | 3(1) Controllers |
-| | 1 Compute |
-+--------------------+----------------------------------------------------+
-| **CPU** | Minimum 1 socket x86_AMD64 Ivy bridge 1.6 GHz |
-| | |
-+--------------------+----------------------------------------------------+
-| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
-| | |
-+--------------------+----------------------------------------------------+
-| **Disk** | Minimum 256GB 10kRPM spinning disks |
-| | |
-+--------------------+----------------------------------------------------+
-| **NICs** | 2(1)x10GE Niantec for Private/Public (Redundant) |
-| | |
-| | 2(1)x10GE Niantec for SAN (Redundant) |
-| | |
-| | 2(1)x1GE for admin (PXE) and control (RabitMQ,etc) |
-| | |
-+--------------------+----------------------------------------------------+
-
-
-Top of the rack (TOR) Configuration requirements
-================================================
-<DESCRIBE NEEDED NETWORK TOPOLOGY SETUP IN THE TORs>
-
-<EXAMPLE>:
-
-The switching infrastructure provides connectivity for the OPNFV infra-structure operations as well as
-for the tenant networks (East/West) and provider connectivity (North/South bound connectivity).
-The switching connectivity can (but does not need to) be fully redundant,
-in case it and comprises a redundant 10GE switch pair for "Traffic/Payload/SAN" purposes as well as
-a 1GE switch pair for "infrastructure control-, management and administration"
-
-The switches are **not** automatically configured from the OPNFV reference platform.
-All the networks involved in the OPNFV infra-structure as well as the provider networks
-and the private tenant VLANs needs to be manually configured.
-
-This following sections guides through required black-box switch configurations.
-
-VLAN considerations and blue-print
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-IP Address plan considerations and blue-print
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-
-OPNFV Software installation and deployment
-==========================================
-<DESCRIBE THE FULL PROCEDURES FOR THE INSTALLATION OF THE OPNFV COMPONENT INSTALLATION AND DEPLOYMENT>
-
-<EXAMPLE>:
-
-This section describes the installation of the Fuel@OPNFV installation server (Fuel master)
-as well as the deployment of the full OPNFV reference platform stack across a server cluster.
-Etc.
-
-Install Fuel master
-^^^^^^^^^^^^^^^^^^^^^
-
-Create an OPNV (Fuel Environment)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Configure the OPNFV environment
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Deploy the OPNFV environment
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-
-Installation health-check
-=========================
-<DESCRIBE ANY MEANS TO DO VERIFY THE INTEGRITY AND HEALTHYNESS OF THE INSTALL>
-
-<EXAMPLE>:
-
-Now that the OPNFV environment has been created, and before the post installation configurations is started,
-perform a system health check from the Fuel GUI:
-
-- Select the "Health check" TAB.
-- Select all test-cases
-- And click "Run tests"
-
-All test cases except the following should pass:
-
-Post installation and deployment actions
-------------------------------------------
-<DESCRIBE ANY POST INSTALLATION ACTIONS/CONFIGURATIONS NEEDED>
-
-<EXAMPLE>:
-After the OPNFV deployment is completed, the following manual changes needs to be performed in order
-for the system to work according OPNFV standards.
-
-**Change host OS password:**
-Change the Host OS password by......
-
-
-References
-==========
-<PROVIDE NEEDED/USEFUL REFERENCES>
-
-<EXAMPLES>:
-
-OPNFV
-^^^^^^^^^^
-
-OpenStack
-^^^^^^^^^^^
-
-OpenDaylight
-^^^^^^^^^^^^^^^
diff --git a/docs/release/installation/UC02-feature.userguide.rst b/docs/release/installation/UC02-feature.userguide.rst
deleted file mode 100644
index 32a6df8..0000000
--- a/docs/release/installation/UC02-feature.userguide.rst
+++ /dev/null
@@ -1,145 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-
-================================================================
-Auto User Guide: Use Case 2 Resiliency Improvements Through ONAP
-================================================================
-
-This document provides the user guide for Fraser release of Auto,
-specifically for Use Case 2: Resiliency Improvements Through ONAP.
-
-.. contents::
- :depth: 3
- :local:
-
-
-Description
-===========
-
-This use case illustrates VNF failure recovery time reduction with ONAP, thanks to its automated monitoring and management.
-It simulates an underlying problem (failure, stress, etc.: any adverse condition in the network that can impact VNFs),
-tracks a VNF, and measures the amount of time it takes for ONAP to restore the VNF functionality.
-
-The benefit for NFV edge service providers is to assess what degree of added VIM+NFVI platform resilience for VNFs is obtained by
-leveraging ONAP closed-loop control, vs. VIM+NFVI self-managed resilience (which may not be aware of the VNF or the corresponding
-end-to-end Service, but only of underlying resources such as VMs and servers).
-
-
-Preconditions:
-
-#. hardware environment in which Edge cloud may be deployed
-#. Edge cloud has been deployed and is ready for operation
-#. ONAP has been deployed onto a cloud and is interfaced (i.e. provisioned for API access) to the Edge cloud
-#. Components of ONAP have been deployed on the Edge cloud as necessary for specific test objectives
-
-In future releases, Auto Use cases will also include the deployment of ONAP (if not already installed), the deployment
-of test VNFs (pre-existing VNFs in pre-existing ONAP can be used in the test as well), the configuration of ONAP for
-monitoring these VNFs (policies, CLAMP, DCAE), in addition to the test scripts which simulate a problem and measures recovery time.
-
-Different types of problems can be simulated, hence the identification of multiple test cases corresponding to this use case,
-as illustrated in this diagram:
-
-.. image:: auto-UC02-testcases.jpg
-
-Description of simulated problems/challenges:
-
-* Physical Infra Failure
-
- * Migration upon host failure: Compute host power is interrupted, and affected workloads are migrated to other available hosts.
- * Migration upon disk failure: Disk volumes are unmounted, and affected workloads are migrated to other available hosts.
- * Migration upon link failure: Traffic on links is interrupted/corrupted, and affected workloads are migrated to other available hosts.
- * Migration upon NIC failure: NIC ports are disabled by host commands, and affected workloads are migrated to other available hosts.
-
-* Virtual Infra Failure
-
- * OpenStack compute host service fail: Core OpenStack service processes on compute hosts are terminated, and auto-restored, or affected workloads are migrated to other available hosts.
- * SDNC service fail: Core SDNC service processes are terminated, and auto-restored.
- * OVS fail: OVS bridges are disabled, and affected workloads are migrated to other available hosts.
- * etc.
-
-* Security
-
- * Host tampering: Host tampering is detected, the host is fenced, and affected workloads are migrated to other available hosts.
- * Host intrusion: Host intrusion attempts are detected, an offending workload, device, or flow is identified and fenced, and as needed affected workloads are migrated to other available hosts.
- * Network intrusion: Network intrusion attempts are detected, and an offending flow is identified and fenced.
-
-
-
-
-Test execution high-level description
-=====================================
-
-The following two MSCs (Message Sequence Charts) show the actors and high-level interactions.
-
-The first MSC shows the preparation activities (assuming the hardware, network, cloud, and ONAP have already been installed):
-onboarding and deployment of VNFs (via ONAP portal and modules in sequence: SDC, VID, SO), and ONAP configuration
-(policy framework, closed-loops in CLAMP, activation of DCAE).
-
-.. image:: auto-UC02-preparation.jpg
-
-The second MSC illustrates the pattern of all test cases for the Resiliency Improvements:
-* simulate the chosen problem (a.k.a. a "Challenge") for this test case, for example suspend a VM which may be used by a VNF
-* start tracking the target VNF of this test case
-* measure the ONAP-orchestrated VNF Recovery Time
-* then the test stops simulating the problem (for example: resume the VM that was suspended),
-
-In parallel, the MSC also shows the sequence of events happening in ONAP, thanks to its configuration to provide Service
-Assurance for the VNF.
-
-.. image:: auto-UC02-pattern.jpg
-
-
-Test design: data model, implementation modules
-===============================================
-
-The high-level design of classes shows the identification of several entities:
-* Test Case: as identified above, each is a special case of the overall use case (e.g., categorized by challenge type)
-* Test Definition: gathers all the information necessary to run a certain test case
-* Metric Definition: describes a certain metric that may be measured, in addition to Recovery Time
-* Challenge Definition: describe the challenge (problem, failure, stress, ...) simulated by the test case
-* Recipient: entity that can receive commands and send responses, and that is queried by the Test Definition or Challenge Definition
-(a recipient would be typically a management service, with interfaces (CLI or API) for clients to query)
-* Resources: with 3 types (VNF, cloud virtual resource such as a VM, physical resource such as a server)
-
-Three of these entities have execution-time corresponding classes:
-* Test Execution, which captures all the relevant data of the execution of a Test Definition
-* Challenge Execution, which captures all the relevant data of the execution of a Challenge Definition
-* Metric Value, which captures the a quantitative measurement of a Metric Definition (with a timestamp)
-
-.. image:: auto-UC02-data1.jpg
-
-The following diagram illustrates an implementation-independent design of the attributes of these entities:
-.. image:: auto-UC02-data2.jpg
-
-This next diagram shows the Python classes and attributes, as implemented by this Use Case (for all test cases):
-
-.. image:: auto-UC02-data3.jpg
-
-Test definition data is stored in serialization files (Python pickles), while test execution data is stored in CSV
-files, for easier post-analysis.
-
-The module design is straightforward: functions and classes for managing data, for interfacing with recipients,
-for executing tests, and for interacting with the test user (choosing a Test Definition, showing the details
-of a Test Definition, starting the execution).
-
-.. image:: auto-UC02-module1.jpg
-
-This last diagram shows the test user menu functions:
-
-.. image:: auto-UC02-module2.jpg
-
-In future releases of Auto, testing environments such as FuncTest and Yardstick might be leveraged.
-
-Also, anonymized test results could be collected from users willing to share them, and aggregates could be
-maintained as benchmarks.
-
-
-
-
-
-
-
-
diff --git a/docs/release/installation/UC02-installation.instruction.rst b/docs/release/installation/UC02-installation.instruction.rst
deleted file mode 100644
index 0e126dd..0000000
--- a/docs/release/installation/UC02-installation.instruction.rst
+++ /dev/null
@@ -1,195 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-========
-Abstract
-========
-
-This document describes how to install OPNFV Auto Use Case 2: Resiliency Improvements Through ONAP, its dependencies and required system resources.
-
-.. contents::
- :depth: 3
- :local:
-
-
-
-Introduction
-============
-<INTRODUCTION TO THE SCOPE AND INTENTION OF THIS DOCUMENT AS WELL AS TO THE SYSTEM TO BE INSTALLED>
-
-<EXAMPLE>:
-
-This document describes the supported software and hardware configurations for the
-Fuel OPNFV reference platform as well as providing guidelines on how to install and
-configure such reference system.
-
-Although the available installation options gives a high degree of freedom in how the system is set-up,
-with what architecture, services and features, etc., not nearly all of those permutations provides
-a OPNFV compliant reference architecture. Following the guidelines in this document ensures
-a result that is OPNFV compliant.
-
-The audience of this document is assumed to have good knowledge in network and Unix/Linux administration.
-
-
-Preface
-=======
-<DESCRIBE NEEDED PREREQUISITES, PLANNING, ETC.>
-
-<EXAMPLE>:
-
-Before starting the installation of Fuel@OPNFV, some planning must preceed.
-
-First of all, the Fuel@OPNFV .iso image needs to be retrieved,
-the Latest stable Arno release of Fuel@OPNFV can be found here: <www.opnfv.org/abc/def>
-
-Alternatively, you may build the .iso from source by cloning the opnfv/genesis git repository:
-<git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis>
-Check-out the Arno release:
-<cd genesis; git checkout arno>
-Goto the fuel directory and build the .iso
-<cd fuel/build; make all>
-
-Familiarize yourself with the Fuel 6.0.1 version by reading the following documents:
-- abc <http://wiki.openstack.org/abc>
-- def <http://wiki.openstack.org/def>
-- ghi <http://wiki.openstack.org/ghi>
-
-Secondly, a number of deployment specific parameters must be collected, those are:
-
-1. Provider sub-net and gateway information
-
-2. Provider VLAN information
-
-3. Provider DNS addresses
-
-4. Provider NTP addresses
-
-This information will be needed for the configuration procedures provided in this document.
-
-
-Hardware requirements
-=====================
-<PROVIDE A LIST OF MINIMUM HARDWARE REQUIREMENTS NEEDED FOR THE INSTALL>
-
-<EXAMPLE>:
-
-Following minimum hardware requirements must be met for installation of Fuel@OPNFV:
-
-+--------------------+----------------------------------------------------+
-| **HW Aspect** | **Requirement** |
-| | |
-+--------------------+----------------------------------------------------+
-| **# of servers** | Minimum 5 (3 for non redundant deployment) |
-| | 1 Fuel deployment master (may be virtualized) |
-| | 3(1) Controllers |
-| | 1 Compute |
-+--------------------+----------------------------------------------------+
-| **CPU** | Minimum 1 socket x86_AMD64 Ivy bridge 1.6 GHz |
-| | |
-+--------------------+----------------------------------------------------+
-| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
-| | |
-+--------------------+----------------------------------------------------+
-| **Disk** | Minimum 256GB 10kRPM spinning disks |
-| | |
-+--------------------+----------------------------------------------------+
-| **NICs** | 2(1)x10GE Niantec for Private/Public (Redundant) |
-| | |
-| | 2(1)x10GE Niantec for SAN (Redundant) |
-| | |
-| | 2(1)x1GE for admin (PXE) and control (RabitMQ,etc) |
-| | |
-+--------------------+----------------------------------------------------+
-
-
-Top of the rack (TOR) Configuration requirements
-================================================
-<DESCRIBE NEEDED NETWORK TOPOLOGY SETUP IN THE TORs>
-
-<EXAMPLE>:
-
-The switching infrastructure provides connectivity for the OPNFV infra-structure operations as well as
-for the tenant networks (East/West) and provider connectivity (North/South bound connectivity).
-The switching connectivity can (but does not need to) be fully redundant,
-in case it and comprises a redundant 10GE switch pair for "Traffic/Payload/SAN" purposes as well as
-a 1GE switch pair for "infrastructure control-, management and administration"
-
-The switches are **not** automatically configured from the OPNFV reference platform.
-All the networks involved in the OPNFV infra-structure as well as the provider networks
-and the private tenant VLANs needs to be manually configured.
-
-This following sections guides through required black-box switch configurations.
-
-VLAN considerations and blue-print
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-IP Address plan considerations and blue-print
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-
-OPNFV Software installation and deployment
-==========================================
-<DESCRIBE THE FULL PROCEDURES FOR THE INSTALLATION OF THE OPNFV COMPONENT INSTALLATION AND DEPLOYMENT>
-
-<EXAMPLE>:
-
-This section describes the installation of the Fuel@OPNFV installation server (Fuel master)
-as well as the deployment of the full OPNFV reference platform stack across a server cluster.
-Etc.
-
-Install Fuel master
-^^^^^^^^^^^^^^^^^^^^^
-
-Create an OPNV (Fuel Environment)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Configure the OPNFV environment
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Deploy the OPNFV environment
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-
-Installation health-check
-=========================
-<DESCRIBE ANY MEANS TO DO VERIFY THE INTEGRITY AND HEALTHYNESS OF THE INSTALL>
-
-<EXAMPLE>:
-
-Now that the OPNFV environment has been created, and before the post installation configurations is started,
-perform a system health check from the Fuel GUI:
-
-- Select the "Health check" TAB.
-- Select all test-cases
-- And click "Run tests"
-
-All test cases except the following should pass:
-
-Post installation and deployment actions
-------------------------------------------
-<DESCRIBE ANY POST INSTALLATION ACTIONS/CONFIGURATIONS NEEDED>
-
-<EXAMPLE>:
-After the OPNFV deployment is completed, the following manual changes needs to be performed in order
-for the system to work according OPNFV standards.
-
-**Change host OS password:**
-Change the Host OS password by......
-
-
-References
-==========
-<PROVIDE NEEDED/USEFUL REFERENCES>
-
-<EXAMPLES>:
-
-OPNFV
-^^^^^^^^^^
-
-OpenStack
-^^^^^^^^^^^
-
-OpenDaylight
-^^^^^^^^^^^^^^^
diff --git a/docs/release/installation/UC03-feature.userguide.rst b/docs/release/installation/UC03-feature.userguide.rst
deleted file mode 100644
index 354d052..0000000
--- a/docs/release/installation/UC03-feature.userguide.rst
+++ /dev/null
@@ -1,100 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-
-================================================================
-Auto User Guide: Use Case 3 Enterprise vCPE
-================================================================
-
-This document provides the user guide for Fraser release of Auto,
-specifically for Use Case 3: Enterprise vCPE.
-
-.. contents::
- :depth: 3
- :local:
-
-
-Description
-===========
-
-This Use Case shows how ONAP can help ensuring that virtual CPEs (including vFW: virtual firewalls) in Edge Cloud are enterprise-grade.
-
-ONAP operations include a verification process for VNF onboarding (i.e. inclusion in the ONAP catalog),
-with multiple Roles (designer, tester, governor, operator), responsible for approving proposed VNFs
-(as VSPs (Vendor Software Products), and eventually as end-to-end Services).
-
-This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only
-chosen from such an approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet
-enterprise-grade requirements. ONAP provides a NBI in addition to a standard portal, thus enabling
-a programmatic deployment of VNFs, still conforming to ONAP processes.
-
-Moreover, ONAP also comprises real-time monitoring (by the DCAE component), which monitors performance for SLAs,
-can adjust allocated resources accordingly (elastic adjustment at VNF level), and can ensure High Availability.
-
-DCAE executes directives coming from policies described in the Policy Framework, and closed-loop controls
-described in the CLAMP component.
-
-Finally, this automated approach also reduces costs, since repetitive actions are designed once and executed multiple times,
-as vCPEs are instantiated and decommissioned (frequent events, given the variability of business activity,
-and a Small Business market similar to the Residential market: many contract updates resulting in many vCPE changes).
-
-NFV edge service providers need to provide site2site, site2dc (Data Center) and site2internet services to tenants
-both efficiently and safely, by deploying such qualified enterprise-grade vCPE.
-
-
-Preconditions:
-
-#. hardware environment in which Edge cloud may be deployed
-#. an Edge cloud has been deployed and is ready for operation
-#. enterprise edge devices, such as ThinCPE, have access to the Edge cloud with WAN interfaces
-#. ONAP components (MSO, SDN-C, APP-C and VNFM) have been deployed onto a cloud and are interfaced (i.e. provisioned for API access) to the Edge cloud
-
-
-Main Success Scenarios:
-
-* VNF spin-up
-
- * vCPE spin-up: MSO calls the VNFM to spin up a vCPE instance from the catalog and then updates the active VNF list
- * vFW spin-up: MSO calls the VNFM to spin up a vFW instance from the catalog and then updates the active VNF list
-
-* site2site
-
- * L3VPN service subscribing: MSO calls the SDNC to create VXLAN tunnels to carry L2 traffic between client's ThinCPE and SP's vCPE, and enables vCPE to route between different sites.
- * L3VPN service unsubscribing: MSO calls the SDNC to destroy tunnels and routes, thus disable traffic between different sites.
-
-
-See `ONAP description of vCPE use case <https://wiki.onap.org/display/DW/Use+Case+proposal%3A+Enterprise+vCPE>`_ for more details, including MSCs.
-
-
-Details on the test cases corresponding to this use case:
-
-* VNF Management
-
- * Spin up a vCPE instance: Spin up a vCPE instance, by calling NBI of the orchestrator.
- * Spin up a vFW instance: Spin up a vFW instance, by calling NBI of the orchestrator.
-
-* VPN as a Service
- * Subscribe to a VPN service: Subscribe to a VPN service, by calling NBI of the orchestrator.
- * Unsubscribe to a VPN service: Unsubscribe to a VPN service, by calling NBI of the orchestrator.
-
-* Internet as a Service
-
- * Subscribe to an Internet service: Subscribe to an Internet service, by calling NBI of the orchestrator.
- * Unsubscribe to an Internet service: Unsubscribe to an Internet service, by calling NBI of the orchestrator.
-
-
-Test execution high-level description
-=====================================
-
-<TBC>
-
-
-
-
-
-
-
-
-
diff --git a/docs/release/installation/UC03-installation.instruction.rst b/docs/release/installation/UC03-installation.instruction.rst
deleted file mode 100644
index 0221885..0000000
--- a/docs/release/installation/UC03-installation.instruction.rst
+++ /dev/null
@@ -1,212 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-========
-Abstract
-========
-
-This document describes how to install OPNFV Auto Use Case 3: Enterprise vCPE, its dependencies and required system resources.
-
-.. contents::
- :depth: 3
- :local:
-
-Version history
----------------------
-
-+--------------------+--------------------+--------------------+--------------------+
-| **Date** | **Ver.** | **Author** | **Comment** |
-| | | | |
-+--------------------+--------------------+--------------------+--------------------+
-| 2015-04-14 | 0.1.0 | Jonas Bjurel | First draft |
-| | | | |
-+--------------------+--------------------+--------------------+--------------------+
-| | 0.1.1 | | |
-| | | | |
-+--------------------+--------------------+--------------------+--------------------+
-| | 1.0 | | |
-| | | | |
-| | | | |
-+--------------------+--------------------+--------------------+--------------------+
-
-
-Introduction
-============
-<INTRODUCTION TO THE SCOPE AND INTENTION OF THIS DOCUMENT AS WELL AS TO THE SYSTEM TO BE INSTALLED>
-
-<EXAMPLE>:
-
-This document describes the supported software and hardware configurations for the
-Fuel OPNFV reference platform as well as providing guidelines on how to install and
-configure such reference system.
-
-Although the available installation options gives a high degree of freedom in how the system is set-up,
-with what architecture, services and features, etc., not nearly all of those permutations provides
-a OPNFV compliant reference architecture. Following the guidelines in this document ensures
-a result that is OPNFV compliant.
-
-The audience of this document is assumed to have good knowledge in network and Unix/Linux administration.
-
-
-Preface
-=======
-<DESCRIBE NEEDED PREREQUISITES, PLANNING, ETC.>
-
-<EXAMPLE>:
-
-Before starting the installation of Fuel@OPNFV, some planning must preceed.
-
-First of all, the Fuel@OPNFV .iso image needs to be retrieved,
-the Latest stable Arno release of Fuel@OPNFV can be found here: <www.opnfv.org/abc/def>
-
-Alternatively, you may build the .iso from source by cloning the opnfv/genesis git repository:
-<git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis>
-Check-out the Arno release:
-<cd genesis; git checkout arno>
-Goto the fuel directory and build the .iso
-<cd fuel/build; make all>
-
-Familiarize yourself with the Fuel 6.0.1 version by reading the following documents:
-- abc <http://wiki.openstack.org/abc>
-- def <http://wiki.openstack.org/def>
-- ghi <http://wiki.openstack.org/ghi>
-
-Secondly, a number of deployment specific parameters must be collected, those are:
-
-1. Provider sub-net and gateway information
-
-2. Provider VLAN information
-
-3. Provider DNS addresses
-
-4. Provider NTP addresses
-
-This information will be needed for the configuration procedures provided in this document.
-
-
-Hardware requirements
-=====================
-<PROVIDE A LIST OF MINIMUM HARDWARE REQUIREMENTS NEEDED FOR THE INSTALL>
-
-<EXAMPLE>:
-
-Following minimum hardware requirements must be met for installation of Fuel@OPNFV:
-
-+--------------------+----------------------------------------------------+
-| **HW Aspect** | **Requirement** |
-| | |
-+--------------------+----------------------------------------------------+
-| **# of servers** | Minimum 5 (3 for non redundant deployment) |
-| | 1 Fuel deployment master (may be virtualized) |
-| | 3(1) Controllers |
-| | 1 Compute |
-+--------------------+----------------------------------------------------+
-| **CPU** | Minimum 1 socket x86_AMD64 Ivy bridge 1.6 GHz |
-| | |
-+--------------------+----------------------------------------------------+
-| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
-| | |
-+--------------------+----------------------------------------------------+
-| **Disk** | Minimum 256GB 10kRPM spinning disks |
-| | |
-+--------------------+----------------------------------------------------+
-| **NICs** | 2(1)x10GE Niantec for Private/Public (Redundant) |
-| | |
-| | 2(1)x10GE Niantec for SAN (Redundant) |
-| | |
-| | 2(1)x1GE for admin (PXE) and control (RabitMQ,etc) |
-| | |
-+--------------------+----------------------------------------------------+
-
-
-Top of the rack (TOR) Configuration requirements
-================================================
-<DESCRIBE NEEDED NETWORK TOPOLOGY SETUP IN THE TORs>
-
-<EXAMPLE>:
-
-The switching infrastructure provides connectivity for the OPNFV infra-structure operations as well as
-for the tenant networks (East/West) and provider connectivity (North/South bound connectivity).
-The switching connectivity can (but does not need to) be fully redundant,
-in case it and comprises a redundant 10GE switch pair for "Traffic/Payload/SAN" purposes as well as
-a 1GE switch pair for "infrastructure control-, management and administration"
-
-The switches are **not** automatically configured from the OPNFV reference platform.
-All the networks involved in the OPNFV infra-structure as well as the provider networks
-and the private tenant VLANs needs to be manually configured.
-
-This following sections guides through required black-box switch configurations.
-
-VLAN considerations and blue-print
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-IP Address plan considerations and blue-print
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-
-OPNFV Software installation and deployment
-==========================================
-<DESCRIBE THE FULL PROCEDURES FOR THE INSTALLATION OF THE OPNFV COMPONENT INSTALLATION AND DEPLOYMENT>
-
-<EXAMPLE>:
-
-This section describes the installation of the Fuel@OPNFV installation server (Fuel master)
-as well as the deployment of the full OPNFV reference platform stack across a server cluster.
-Etc.
-
-Install Fuel master
-^^^^^^^^^^^^^^^^^^^^^
-
-Create an OPNV (Fuel Environment)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Configure the OPNFV environment
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Deploy the OPNFV environment
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-
-Installation health-check
-=========================
-<DESCRIBE ANY MEANS TO DO VERIFY THE INTEGRITY AND HEALTHYNESS OF THE INSTALL>
-
-<EXAMPLE>:
-
-Now that the OPNFV environment has been created, and before the post installation configurations is started,
-perform a system health check from the Fuel GUI:
-
-- Select the "Health check" TAB.
-- Select all test-cases
-- And click "Run tests"
-
-All test cases except the following should pass:
-
-Post installation and deployment actions
-------------------------------------------
-<DESCRIBE ANY POST INSTALLATION ACTIONS/CONFIGURATIONS NEEDED>
-
-<EXAMPLE>:
-After the OPNFV deployment is completed, the following manual changes needs to be performed in order
-for the system to work according OPNFV standards.
-
-**Change host OS password:**
-Change the Host OS password by......
-
-
-References
-==========
-<PROVIDE NEEDED/USEFUL REFERENCES>
-
-<EXAMPLES>:
-
-OPNFV
-^^^^^^^^^^
-
-OpenStack
-^^^^^^^^^^^
-
-OpenDaylight
-^^^^^^^^^^^^^^^
diff --git a/docs/release/installation/index.rst b/docs/release/installation/index.rst
deleted file mode 100644
index 0120e92..0000000
--- a/docs/release/installation/index.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-.. _auto-configguide:
-
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-
-=====================================================
-OPNFV Auto (ONAP-Automated OPNFV) Configuration Guide
-=====================================================
-
-.. toctree::
- :maxdepth: 1
-
- UC01-installation.instruction.rst
- UC02-installation.instruction.rst
- UC03-installation.instruction.rst
diff --git a/docs/release/release-notes/Auto-release-notes.rst b/docs/release/release-notes/Auto-release-notes.rst
index 84665cd..eab68cc 100644
--- a/docs/release/release-notes/Auto-release-notes.rst
+++ b/docs/release/release-notes/Auto-release-notes.rst
@@ -4,15 +4,14 @@
.. (c) Open Platform for NFV Project, Inc. and its contributors
-==================
Auto Release Notes
==================
This document provides the release notes for Fraser release of Auto.
-Important notes
-===============
+Important notes for this release
+================================
Initial release (project inception: July 2017).
@@ -26,7 +25,7 @@ In particular, OPNFV has yet to integrate higher-level automation features for V
Auto ("ONAP-Automated OPNFV") will focus on ONAP component integration and verification with OPNFV reference platforms/scenarios, through primarily a post-install process in order to avoid impact to OPNFV installer projects. As much as possible, this will use a generic installation/integration process (not specific to any OPNFV installer's technology).
-* `ONAP <https://www.onap.org/`_ (a Linux Foundation Project) is an open source software platform that delivers robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of Software-Defined Networks (SDNs).
+* `ONAP <https://www.onap.org/>`_ (a Linux Foundation Project) is an open source software platform that delivers robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of Software-Defined Networks (SDNs).
While all of ONAP is in scope, as it proceeds, the project will focus on specific aspects of this integration and verification in each release. Some example topics and work items include:
@@ -58,8 +57,8 @@ Auto’s goals include the standup and tests for integrated ONAP-Cloud platforms
Auto currently defines three use cases: Edge Cloud, Resiliency Improvements, and Enterprise vCPE. These use cases aim to show:
* increased autonomy of Edge Cloud management (automation, catalog-based deployment)
-* increased resilience (i.e. fast VNF recovery in case of failure or problem, thanks to closed-loop control)
-* enterprise-grade performance of vCPEs (certification during onboarding, then real-time performance assurance with SLAs and HA).
+* increased resilience (i.e. fast VNF recovery in case of failure or problem, thanks to closed-loop control), including end-to-end composite services of which a Cloud Manager may not be aware
+* enterprise-grade performance of vCPEs (certification during onboarding, then real-time performance assurance with SLAs and HA as well as scaling).
The use cases define test cases, which initially will be independent, but which might eventually be integrated to FuncTest.
@@ -67,18 +66,22 @@ Additional use cases can be added in the future, such as vIMS (example: project
Target architectures include x86 and Arm.
-An ONAP instance (without DCAE) has been installed over Kubernetes on bare metal on an x86 pod of 6 servers at UNH IOL.
-Onboarding of 2 VNFs is in progress: a vCPE and a vFW.
+An ONAP instance (without DCAE) has been installed over Kubernetes on bare metal on an x86 pod of 6 servers at UNH IOL. A transition is in progress, to leverage OPNFV LaaS (Lab-as-a-Service) pods (`Pharos <https://labs.opnfv.org/>`_).
+ONAP-based onboarding and deployment of VNFs is in progress (ONAP pre-loading of VNFs must still done outside of ONAP: for VM-based VNFs, need to prepare OpenStack stacks (using Heat templates), then make an instance snapshot which serves as the binary image of the VNF).
Integration with Arm servers has started (exploring binary compatibility):
* Openstack is currently installed on a 6-server pod of Arm servers
-* a Kubernetes cluster is installed there as well, for another instance of ONAP on Arm servers
-* An additional set of 14 Arm servers is in the process of being deployed at UNH, for increased capacity
-* LaaS (Lab as a Service) resources are also used (hpe16, hpe17, hpe19)
+* A set of 14 additional Arm servers was deployed at UNH, for increased capacity
+* Arm-compatible Docker images are in the process of being developed
Test case implementation for the three use cases has started.
+Finally, the following figure illustrates Auto in terms of project activities:
+
+.. image:: auto-project-activities.png
+
+
Release Data
============
@@ -115,6 +118,7 @@ Document version changes
Reason for version
^^^^^^^^^^^^^^^^^^
+
Feature additions
~~~~~~~~~~~~~~~~~
@@ -127,21 +131,37 @@ Initial release, with use case descriptions, release plan, and in-progress test
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| AUTO-1 | Define Auto-UC-01 Service Provider's |
+| AUTO-1, UC1 definition | Define Auto-UC-01 Service Provider's |
| | Management of Edge Cloud |
+--------------------------------------+--------------------------------------+
-| AUTO-2 | Define Auto-UC-02 Resilience |
+| AUTO-2, UC2 definition | Define Auto-UC-02 Resilience |
| | Improvements through ONAP |
+--------------------------------------+--------------------------------------+
-| AUTO-7 | Define Auto-UC-03 Enterprise vCPE |
+| AUTO-7, UC3 definition | Define Auto-UC-03 Enterprise vCPE |
| | |
+--------------------------------------+--------------------------------------+
-| AUTO-4 | Develop test cases for Auto-UC-02 |
+| AUTO-4, UC2 test case definition | Develop test cases for Auto-UC-02 |
| | Resilience Improvements through ONAP |
+--------------------------------------+--------------------------------------+
-| AUTO-8 | Develop test cases for Auto-UC-03 |
+| AUTO-8, UC3 test case definition | Develop test cases for Auto-UC-03 |
| | Enterprise vCPE |
+--------------------------------------+--------------------------------------+
+| (UC1 test case definition is done, | |
+| but no associated JIRA ticket) | |
++--------------------------------------+--------------------------------------+
+| AUTO-5, install ONAP | Getting ONAP running onto Pharos |
+| | deployment (without DCAE) |
++--------------------------------------+--------------------------------------+
+| AUTO-31, UC1 test case progress | auto-edge-pif-001 Basic OpenStack |
+| | environment check |
++--------------------------------------+--------------------------------------+
+| AUTO-13, UC2 test case progress | Develop test script for vif-001: |
+| | Data Management |
++--------------------------------------+--------------------------------------+
+| AUTO-20, UC3 test case progress | Onboarding of VNFs via SDC GUI |
+| | |
++--------------------------------------+--------------------------------------+
+
Bug corrections
@@ -160,6 +180,7 @@ Bug corrections
| | |
+--------------------------------------+--------------------------------------+
+
Deliverables
============
@@ -174,8 +195,10 @@ Documentation deliverables
Initial versions of:
-* User guide `OPNFV User and Configuration Guide <http://docs.opnfv.org/en/latest/release/userguide.introduction.html>`_
* Release notes (this document)
+* User guide `OPNFV User and Configuration Guide <http://docs.opnfv.org/en/latest/release/userguide.introduction.html>`_
+* Configuration Guide (same landing page as User Guide)
+
@@ -240,6 +263,23 @@ References
For more information on the OPNFV Fraser release, please see:
http://opnfv.org/fraser
-Auto Wiki:
-https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+Auto Wiki pages:
+
+* `Auto wiki main page <https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095>`_
+
+
+OPNFV documentation on Auto:
+
+* `Auto release notes <http://docs.opnfv.org/en/latest/release/release-notes.html>`_
+* `Auto use case user guides <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/userguide/index.html#auto-userguide>`_
+* `Auto configuration guide <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/configguide/index.html#auto-configguide>`_
+
+
+Git&Gerrit Auto repositories:
+
+* `Auto Git repository <https://git.opnfv.org/auto/tree/>`_
+* `Gerrit for Auto project <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_
+
+
diff --git a/docs/release/release-notes/auto-project-activities.png b/docs/release/release-notes/auto-project-activities.png
new file mode 100644
index 0000000..a946372
--- /dev/null
+++ b/docs/release/release-notes/auto-project-activities.png
Binary files differ
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index 7a70167..264f21c 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -10,6 +10,6 @@ OPNFV Auto (ONAP-Automated OPNFV) Release Notes
.. toctree::
:numbered:
- :maxdepth: 2
+ :maxdepth: 3
Auto-release-notes.rst
diff --git a/docs/release/userguide/UC01-feature.userguide.rst b/docs/release/userguide/UC01-feature.userguide.rst
index 5cf38e1..ea02bad 100644
--- a/docs/release/userguide/UC01-feature.userguide.rst
+++ b/docs/release/userguide/UC01-feature.userguide.rst
@@ -34,7 +34,7 @@ Preconditions:
Main Success Scenarios:
-* lifecycle management - stop, stop, scale (dependent upon telemetry)
+* lifecycle management - start, stop, scale (dependent upon telemetry)
* recovering from faults (detect, determine appropriate response, act); i.e. exercise closed-loop policy engine in ONAP
@@ -47,7 +47,7 @@ Details on the test cases corresponding to this use case:
* Environment check
- * Basic environment check: Create test script to check basic VIM (OpenStack), ONAP, and VNF are up and running
+ * Basic environment check: Create test script to check basic VIM (OpenStack), ONAP, and VNF(s) are up and running
* VNF lifecycle management
@@ -55,7 +55,7 @@ Details on the test cases corresponding to this use case:
* Tacker Monitoring Driver (VNFMonitorPing):
- * Write Tacker Monitor driver to handle monitor_call and based on return state value create custom events
+ * Write Tacker Monitor driver to handle monitor_call and, based on return state value, create custom events
* If Ping to VNF fails, trigger below events
* Event 1 : Collect failure logs from VNF
diff --git a/docs/release/userguide/UC02-feature.userguide.rst b/docs/release/userguide/UC02-feature.userguide.rst
index 0ecb7de..3ed5781 100644
--- a/docs/release/userguide/UC02-feature.userguide.rst
+++ b/docs/release/userguide/UC02-feature.userguide.rst
@@ -8,7 +8,8 @@
Auto User Guide: Use Case 2 Resiliency Improvements Through ONAP
================================================================
-This document provides the user guide for Fraser release of Auto, specifically for Use Case 2: Resiliency Improvements Through ONAP.
+This document provides the user guide for Fraser release of Auto,
+specifically for Use Case 2: Resiliency Improvements Through ONAP.
Description
@@ -22,6 +23,8 @@ This use case illustrates VNF failure recovery time reduction with ONAP, thanks
The benefit for NFV edge service providers is to assess what degree of added VIM+NFVI platform resilience for VNFs is obtained by leveraging ONAP closed-loop control, vs. VIM+NFVI self-managed resilience (which may not be aware of the VNF or the corresponding end-to-end Service, but only of underlying resources such as VMs and servers).
+Also, a problem, or challenge, may not necessarily be a failure (which could also be recovered by other layers): it could be an issue leading to suboptimal performance, without failure. A VNF management layer as provided by ONAP may detect such non-failure problems, and provide a recovery solution which no other layer could provide in a given deployment.
+
Preconditions:
@@ -36,7 +39,7 @@ Different types of problems can be simulated, hence the identification of multip
.. image:: auto-UC02-testcases.jpg
-Description of simulated problems/challenges:
+Description of simulated problems/challenges, leading to various test cases:
* Physical Infra Failure
@@ -60,7 +63,6 @@ Description of simulated problems/challenges:
-
Test execution high-level description
=====================================
@@ -76,7 +78,7 @@ The second MSC illustrates the pattern of all test cases for the Resiliency Impr
* simulate the chosen problem (a.k.a. a "Challenge") for this test case, for example suspend a VM which may be used by a VNF
* start tracking the target VNF of this test case
* measure the ONAP-orchestrated VNF Recovery Time
-* then the test stops simulating the problem (for example: resume the VM that was suspended),
+* then the test stops simulating the problem (for example: resume the VM that was suspended)
In parallel, the MSC also shows the sequence of events happening in ONAP, thanks to its configuration to provide Service Assurance for the VNF.
@@ -86,21 +88,21 @@ In parallel, the MSC also shows the sequence of events happening in ONAP, thanks
Test design: data model, implementation modules
===============================================
-The high-level design of classes identifies several entities:
+The high-level design of classes identifies several entities, described as follows:
-* Test Case: as identified above, each is a special case of the overall use case (e.g., categorized by challenge type)
-* Test Definition: gathers all the information necessary to run a certain test case
-* Metric Definition: describes a certain metric that may be measured, in addition to Recovery Time
-* Challenge Definition: describe the challenge (problem, failure, stress, ...) simulated by the test case
-* Recipient: entity that can receive commands and send responses, and that is queried by the Test Definition or Challenge Definition (a recipient would be typically a management service, with interfaces (CLI or API) for clients to query)
-* Resources: with 3 types (VNF, cloud virtual resource such as a VM, physical resource such as a server)
+* ``Test Case`` : as identified above, each is a special case of the overall use case (e.g., categorized by challenge type)
+* ``Test Definition`` : gathers all the information necessary to run a certain test case
+* ``Metric Definition`` : describes a certain metric that may be measured for a Test Case, in addition to Recovery Time
+* ``Challenge Definition`` : describe the challenge (problem, failure, stress, ...) simulated by the test case
+* ``Recipient`` : entity that can receive commands and send responses, and that is queried by the Test Definition or Challenge Definition (a recipient would be typically a management service, with interfaces (CLI or API) for clients to query)
+* ``Resources`` : with 3 types (VNF, cloud virtual resource such as a VM, physical resource such as a server)
Three of these entities have execution-time corresponding classes:
-* Test Execution, which captures all the relevant data of the execution of a Test Definition
-* Challenge Execution, which captures all the relevant data of the execution of a Challenge Definition
-* Metric Value, which captures the a quantitative measurement of a Metric Definition (with a timestamp)
+* ``Test Execution`` , which captures all the relevant data of the execution of a Test Definition
+* ``Challenge Execution`` , which captures all the relevant data of the execution of a Challenge Definition
+* ``Metric Value`` , which captures the quantitative measurement of a Metric Definition (with a timestamp)
.. image:: auto-UC02-data1.jpg
@@ -122,13 +124,28 @@ The module design is straightforward: functions and classes for managing data, f
.. image:: auto-UC02-module1.jpg
-This last diagram shows the test user menu functions:
+This last diagram shows the test user menu functions, when used interactively:
.. image:: auto-UC02-module2.jpg
-In future releases of Auto, testing environments such as FuncTest and Yardstick might be leveraged.
+In future releases of Auto, testing environments such as Robot, FuncTest and Yardstick might be leveraged. Use Case code will then be invoked by API, not by a CLI interaction.
Also, anonymized test results could be collected from users willing to share them, and aggregates could be
maintained as benchmarks.
+As further illustration, the next figure shows cardinalities of class instances: one Test Definition per Test Case, multiple Test Executions per Test Definition, zero or one Recovery Time Metric Value per Test Execution (zero if the test failed for any reason, including if ONAP failed to recover the challenge), etc.
+
+.. image:: auto-UC02-cardinalities.png
+
+
+In this particular implementation, both Test Definition and Challenge Definition classes have a generic execution method (e.g., ``run_test_code()`` for Test Definition) which can invoke a particular script, by way of an ID (which can be configured, and serves as a script selector for each Test Definition instance). The overall test execution logic between classes is show in the next figure.
+
+.. image:: auto-UC02-logic.png
+
+The execution of a test case starts with invoking the generic method from Test Definition, which then creates Execution instances, invokes Challenge Definition methods, performs the Recovery time calculation, performs script-specific actions, and writes results to the CSV files.
+
+Finally, the following diagram show a mapping between these class instances and the initial test case design. It corresponds to the test case which simulates a VM failure, and shows how the OpenStack SDK API is invoked (with a connection object) by the Challenge Definition methods, to suspend and resume a VM.
+
+.. image:: auto-UC02-TC-mapping.png
+
diff --git a/docs/release/userguide/UC03-feature.userguide.rst b/docs/release/userguide/UC03-feature.userguide.rst
index 5f28158..cf96981 100644
--- a/docs/release/userguide/UC03-feature.userguide.rst
+++ b/docs/release/userguide/UC03-feature.userguide.rst
@@ -15,16 +15,25 @@ specifically for Use Case 3: Enterprise vCPE.
Description
===========
-This Use Case shows how ONAP can help ensuring that virtual CPEs (including vFW: virtual firewalls) in Edge Cloud are enterprise-grade.
+This Use Case shows how ONAP can help ensure that virtual CPEs (including vFW: virtual firewalls) in Edge Cloud are enterprise-grade.
+Other vCPE examples: vAAA, vDHCP, vDNS, vGW, vBNG, vRouter, ...
-ONAP operations include a verification process for VNF onboarding (i.e. inclusion in the ONAP catalog), with multiple Roles (designer, tester, governor, operator), responsible for approving proposed VNFs (as VSPs (Vendor Software Products), and eventually as end-to-end Services).
+ONAP operations include a verification process for VNF onboarding (i.e., inclusion in the ONAP catalog), with multiple Roles (Designer, Tester, Governor, Operator), responsible for approving proposed VNFs (as VSPs (Vendor Software Products), and eventually as end-to-end Services).
-This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only chosen from such an approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet enterprise-grade requirements. ONAP provides a NBI in addition to a standard portal, thus enabling a programmatic deployment of VNFs, still conforming to ONAP processes.
+This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only chosen from such an approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet enterprise-grade requirements. ONAP provides a NBI (currently HTTP-based) in addition to a standard GUI portal, thus enabling a programmatic deployment of VNFs, still conforming to ONAP processes.
-Moreover, ONAP also comprises real-time monitoring (by the DCAE component), which monitors performance for SLAs, can adjust allocated resources accordingly (elastic adjustment at VNF level), and can ensure High Availability.
+Moreover, ONAP also comprises real-time monitoring (by the DCAE component), which can perform the following functions:
+
+* monitor VNF performance for SLAs
+* adjust allocated resources accordingly (elastic adjustment at VNF level: scaling out and in, possibly also scaling up and down)
+* ensure High Availability (restoration of failed or underperforming services)
DCAE executes directives coming from policies described in the Policy Framework, and closed-loop controls described in the CLAMP component.
+ONAP can perform the provisioning side of a BSS Order Management application handling vCPE orders.
+
+Additional processing can be added to ONAP (internally as configured policies and closed-loop controls, or externally as separate systems): Path Computation Element and Load Balancing, and even telemetry-based Network Artificial Intelligence.
+
Finally, this automated approach also reduces costs, since repetitive actions are designed once and executed multiple times, as vCPEs are instantiated and decommissioned (frequent events, given the variability of business activity, and a Small Business market similar to the Residential market: many contract updates resulting in many vCPE changes).
NFV edge service providers need to provide site2site, site2dc (Data Center) and site2internet services to tenants both efficiently and safely, by deploying such qualified enterprise-grade vCPE.
@@ -42,34 +51,50 @@ Main Success Scenarios:
* VNF spin-up
- * vCPE spin-up: MSO calls the VNFM to spin up a vCPE instance from the catalog and then updates the active VNF list
* vFW spin-up: MSO calls the VNFM to spin up a vFW instance from the catalog and then updates the active VNF list
+ * other vCPEs spin-up: MSO calls the VNFM to spin up a vCPE instance from the catalog and then updates the active VNF list
* site2site
* L3VPN service subscribing: MSO calls the SDNC to create VXLAN tunnels to carry L2 traffic between client's ThinCPE and SP's vCPE, and enables vCPE to route between different sites.
* L3VPN service unsubscribing: MSO calls the SDNC to destroy tunnels and routes, thus disable traffic between different sites.
+* site2dc (site to Data Center) by VPN
+* site2internet
+* scaling control (start with scaling out/in)
See `ONAP description of vCPE use case <https://wiki.onap.org/display/DW/Use+Case+proposal%3A+Enterprise+vCPE>`_ for more details, including MSCs.
Details on the test cases corresponding to this use case:
-* VNF Management
+* vCPE VNF deployment
+
+ * Spin up a vFW instance by calling NBI of the orchestrator.
+ * Following the vFW example and pattern, spin up other vCPE instances
+
+* vCPE VNF networking
+
+ * Subscribe/Unsubscribe to a VPN service: configure tenant/subscriber for vCPE, configure VPN service
+ * Subscribe/Unsubscribe to an Internet Access service: configure tenant/subscriber for vCPE, configure Internet Access service
+
+* vCPE VNF Scaling
+
+ * ONAP-based VNF Scale-out and Scale-in (using measurements arriving in DCAE, policies/CLAMP or external system performing LB function)
+ * later, possibly also scale-up and scale-down
+
+
+
+The following diagram shows these test cases:
+
+.. image:: auto-UC03-TestCases.png
- * Spin up a vCPE instance: Spin up a vCPE instance, by calling NBI of the orchestrator.
- * Spin up a vFW instance: Spin up a vFW instance, by calling NBI of the orchestrator.
-* VPN as a Service
+Illustration of test cases mapped to architecture, with possible external systems (BSS for Order Management, PCE+LB, Network AI:
- * Subscribe to a VPN service: Subscribe to a VPN service, by calling NBI of the orchestrator.
- * Unsubscribe to a VPN service: Unsubscribe to a VPN service, by calling NBI of the orchestrator.
+.. image:: auto-UC03-TC-archit.png
-* Internet as a Service
- * Subscribe to an Internet service: Subscribe to an Internet service, by calling NBI of the orchestrator.
- * Unsubscribe to an Internet service: Unsubscribe to an Internet service, by calling NBI of the orchestrator.
Test execution high-level description
diff --git a/docs/release/userguide/auto-UC02-TC-mapping.png b/docs/release/userguide/auto-UC02-TC-mapping.png
new file mode 100644
index 0000000..c2dd0db
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-TC-mapping.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-cardinalities.png b/docs/release/userguide/auto-UC02-cardinalities.png
new file mode 100644
index 0000000..10dd3b0
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-cardinalities.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-data1.jpg b/docs/release/userguide/auto-UC02-data1.jpg
index 02a60ba..62526c5 100644
--- a/docs/release/userguide/auto-UC02-data1.jpg
+++ b/docs/release/userguide/auto-UC02-data1.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-data2.jpg b/docs/release/userguide/auto-UC02-data2.jpg
index 7096c96..df73a94 100644
--- a/docs/release/userguide/auto-UC02-data2.jpg
+++ b/docs/release/userguide/auto-UC02-data2.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-data3.jpg b/docs/release/userguide/auto-UC02-data3.jpg
index 8e8921d..3f84a20 100644
--- a/docs/release/userguide/auto-UC02-data3.jpg
+++ b/docs/release/userguide/auto-UC02-data3.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-logic.png b/docs/release/userguide/auto-UC02-logic.png
new file mode 100644
index 0000000..90b41dd
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-logic.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC03-TC-archit.png b/docs/release/userguide/auto-UC03-TC-archit.png
new file mode 100644
index 0000000..95d641b
--- /dev/null
+++ b/docs/release/userguide/auto-UC03-TC-archit.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC03-TestCases.png b/docs/release/userguide/auto-UC03-TestCases.png
new file mode 100644
index 0000000..bb84a57
--- /dev/null
+++ b/docs/release/userguide/auto-UC03-TestCases.png
Binary files differ
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
index 7cfbe94..dd308dc 100644
--- a/docs/release/userguide/index.rst
+++ b/docs/release/userguide/index.rst
@@ -16,7 +16,7 @@ OPNFV Auto (ONAP-Automated OPNFV) User Guide
.. toctree::
:numbered:
- :maxdepth: 2
+ :maxdepth: 3
UC01-feature.userguide.rst
UC02-feature.userguide.rst
diff --git a/lib/auto/testcase/resiliency/AutoResilItfCloud.py b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
index 69c5327..302a662 100644
--- a/lib/auto/testcase/resiliency/AutoResilItfCloud.py
+++ b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
@@ -33,14 +33,15 @@
######################################################################
# import statements
import AutoResilGlobal
+import time
# for method 1 and 2
-#import openstack
+import openstack
#for method 3
-from openstack import connection
+#from openstack import connection
-def os_list_servers(conn):
+def openstack_list_servers(conn):
"""List OpenStack servers."""
# see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/compute.html
if conn != None:
@@ -49,14 +50,20 @@ def os_list_servers(conn):
try:
i=1
for server in conn.compute.servers():
- print('Server',str(i),'\n',server,'n')
+ print('Server',str(i))
+ print(' Name:',server.name)
+ print(' ID:',server.id)
+ print(' key:',server.key_name)
+ print(' status:',server.status)
+ print(' AZ:',server.availability_zone)
+ print('Details:\n',server)
i+=1
except Exception as e:
print("Exception:",type(e), e)
print("No Servers\n")
-def os_list_networks(conn):
+def openstack_list_networks(conn):
"""List OpenStack networks."""
# see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/network.html
if conn != None:
@@ -65,14 +72,14 @@ def os_list_networks(conn):
try:
i=1
for network in conn.network.networks():
- print('Network',str(i),'\n',network,'n')
+ print('Network',str(i),'\n',network,'\n')
i+=1
except Exception as e:
print("Exception:",type(e), e)
print("No Networks\n")
-def os_list_volumes(conn):
+def openstack_list_volumes(conn):
"""List OpenStack volumes."""
# see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/block_storage.html
# note: The block_storage member will only be added if the service is detected.
@@ -82,14 +89,20 @@ def os_list_volumes(conn):
try:
i=1
for volume in conn.block_storage.volumes():
- print('Volume',str(i),'\n',volume,'n')
+ print('Volume',str(i))
+ print(' Name:',volume.name)
+ print(' ID:',volume.id)
+ print(' size:',volume.size)
+ print(' status:',volume.status)
+ print(' AZ:',volume.availability_zone)
+ print('Details:\n',volume)
i+=1
except Exception as e:
print("Exception:",type(e), e)
print("No Volumes\n")
-
-def os_list_users(conn):
+
+def openstack_list_users(conn):
"""List OpenStack users."""
# see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
if conn != None:
@@ -98,13 +111,13 @@ def os_list_users(conn):
try:
i=1
for user in conn.identity.users():
- print('User',str(i),'\n',user,'n')
+ print('User',str(i),'\n',user,'\n')
i+=1
except Exception as e:
print("Exception:",type(e), e)
print("No Users\n")
-
-def os_list_projects(conn):
+
+def openstack_list_projects(conn):
"""List OpenStack projects."""
# see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
if conn != None:
@@ -113,14 +126,14 @@ def os_list_projects(conn):
try:
i=1
for project in conn.identity.projects():
- print('Project',str(i),'\n',project,'n')
+ print('Project',str(i),'\n',project,'\n')
i+=1
except Exception as e:
print("Exception:",type(e), e)
print("No Projects\n")
-
-def os_list_domains(conn):
+
+def openstack_list_domains(conn):
"""List OpenStack domains."""
# see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
if conn != None:
@@ -129,7 +142,7 @@ def os_list_domains(conn):
try:
i=1
for domain in conn.identity.domains():
- print('Domain',str(i),'\n',domain,'n')
+ print('Domain',str(i),'\n',domain,'\n')
i+=1
except Exception as e:
print("Exception:",type(e), e)
@@ -138,14 +151,17 @@ def os_list_domains(conn):
-
-
+
+
def gdtest_openstack():
- # Method 1: assume there is a clouds.yaml file in PATH, starting path search with local directory
+
+ # Method 1 (preferred) : assume there is a clouds.yaml file in PATH, starting path search with local directory
#conn = openstack.connect(cloud='armopenstack', region_name='RegionOne')
- #conn = openstack.connect(cloud='hpe16openstack', region_name='RegionOne')
- # getting error: AttributeError: module 'openstack' has no attribute 'connect'
+ #conn = openstack.connect(cloud='hpe16openstackEuphrates', region_name='RegionOne')
+ conn = openstack.connect(cloud='hpe16openstackFraser', region_name='RegionOne')
+ # if getting error: AttributeError: module 'openstack' has no attribute 'connect', check that openstack is installed for this python version
+
# Method 2: pass arguments directly, all as strings
# see details at https://docs.openstack.org/python-openstacksdk/latest/user/connection.html
@@ -163,19 +179,20 @@ def gdtest_openstack():
# password='opnfv_secret',
# region_name='RegionOne',
# )
- # getting error: AttributeError: module 'openstack' has no attribute 'connect'
+ # if getting error: AttributeError: module 'openstack' has no attribute 'connect', check that openstack is installed for this python version
+
# Method 3: create Connection object directly
- auth_args = {
- #'auth_url': 'https://10.10.50.103:5000/v2.0', # Arm
- #'auth_url': 'http://10.16.0.101:5000/v2.0', # hpe16, Euphrates
- 'auth_url': 'http://10.16.0.107:5000/v3', # hpe16, Fraser
- 'project_name': 'admin',
- 'username': 'admin',
- 'password': 'opnfv_secret',
- 'region_name': 'RegionOne',
- 'domain': 'Default'}
- conn = connection.Connection(**auth_args)
+ # auth_args = {
+ # #'auth_url': 'https://10.10.50.103:5000/v2.0', # Arm
+ # #'auth_url': 'http://10.16.0.101:5000/v2.0', # hpe16, Euphrates
+ # 'auth_url': 'http://10.16.0.107:5000/v3', # hpe16, Fraser
+ # 'project_name': 'admin',
+ # 'username': 'admin',
+ # 'password': 'opnfv_secret',
+ # 'region_name': 'RegionOne',
+ # 'domain': 'Default'}
+ # conn = connection.Connection(**auth_args)
#conn = connection.Connection(
#auth_url='http://10.16.0.107:5000/v3',
@@ -184,12 +201,65 @@ def gdtest_openstack():
#password='opnfv_secret')
- os_list_servers(conn)
- os_list_networks(conn)
- os_list_volumes(conn)
- os_list_users(conn)
- os_list_projects(conn)
- os_list_domains(conn)
+ openstack_list_servers(conn)
+ openstack_list_networks(conn)
+ openstack_list_volumes(conn)
+ openstack_list_users(conn)
+ openstack_list_projects(conn)
+ openstack_list_domains(conn)
+
+ # VM: hpe16-Auto-UC2-gdtest-compute1
+ gds_ID = '715c677a-7914-4ca8-8c6d-75bf29eeb940'
+ gds = conn.compute.get_server(gds_ID)
+ print('\ngds.name=',gds.name)
+ print('gds.status=',gds.status)
+ print('suspending...')
+ conn.compute.suspend_server(gds_ID) # NOT synchronous: returns before suspension action is completed
+ wait_seconds = 10
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+ gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
+ print('gds.status=',gds.status)
+ print('resuming...')
+ conn.compute.resume_server(gds_ID)
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+ gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
+ print('gds.status=',gds.status)
+
+
+
+ #VM: test3
+ gds_ID = 'd3ceffc3-5967-4f18-b8b5-b1b2bd7ab76d'
+ gds = conn.compute.get_server(gds_ID)
+ print('\ngds.name=',gds.name)
+ print('gds.status=',gds.status)
+ print('suspending...')
+ conn.compute.suspend_server(gds_ID) # NOT synchronous: returns before suspension action is completed
+ wait_seconds = 10
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+ gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
+ print('gds.status=',gds.status)
+ print('resuming...')
+ conn.compute.resume_server(gds_ID)
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+ gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
+ print('gds.status=',gds.status)
+
+ #Volume: hpe16-Auto-UC2-gdtest-volume1
+ gdv_ID = '5a6c1dbd-5097-4a9b-8f79-6f03cde18bf6'
+ gdv = conn.block_storage.get_volume(gdv_ID)
+ # no API for stopping/restarting a volume... only delete. ONAP would have to completely migrate a VNF depending on this volume
+ print('\ngdv.name=',gdv.name)
+ print('gdv.status=',gdv.status)
+ #gdv_recreate = gdv
+ #print('deleting...')
+ #conn.block_storage.delete_volume(gdv_ID)
+ #conn.block_storage.delete_volume(gdv)
+ #print('recreating...')
+ #gdv = conn.block_storage.create_volume(<attributes saved in gdv_recreate>)
# get_server(server): Get a single Server
@@ -211,7 +281,7 @@ def main():
gdtest_openstack()
- print("Ciao\n")
+ print("\nCiao\n")
if __name__ == "__main__":
main()
diff --git a/lib/auto/testcase/resiliency/AutoResilMain.py b/lib/auto/testcase/resiliency/AutoResilMain.py
index 2f67bdf..1d21f6a 100644
--- a/lib/auto/testcase/resiliency/AutoResilMain.py
+++ b/lib/auto/testcase/resiliency/AutoResilMain.py
@@ -164,7 +164,6 @@ def main():
print("Problem with test definition: empty")
sys.exit() # stop entire program, because test definition MUST be correct
else:
- # TODO run test: call selected test definition run_test_code() method
test_def = get_indexed_item_from_list(selected_test_def_ID, AutoResilGlobal.test_definition_list)
if test_def != None:
test_def.run_test_code()
diff --git a/lib/auto/testcase/resiliency/AutoResilMgTestDef.py b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
index 9667f93..7e0b50d 100644
--- a/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
+++ b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
@@ -320,10 +320,62 @@ class TestDefinition(AutoBaseObject):
def run_test_code(self):
- """Run currently selected test code."""
+ """Run currently selected test code. Common code runs here, specific code is invoked through test_code_list and test_code_ID."""
try:
+ # here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,
+ # specific monitoring of VNF, trigger stop code from challenge def
+
+ time1 = datetime.now() # get time as soon as execution starts
+
+ # create challenge execution instance
+ chall_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of challenge executions somewhere. or could be random.
+ chall_exec_name = 'challenge execution' # challenge def ID is already passed
+ chall_exec_challDefID = self.challenge_def_ID
+ chall_exec = ChallengeExecution(chall_exec_ID, chall_exec_name, chall_exec_challDefID)
+ chall_exec.log.append_to_list('challenge execution created')
+
+ # create test execution instance
+ test_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of text executions somewhere. or could be random.
+ test_exec_name = 'test execution' # test def ID is already passed
+ test_exec_testDefID = self.ID
+ test_exec_userID = '' # or get user name from getpass module: import getpass and test_exec_userID = getpass.getuser()
+ test_exec = TestExecution(test_exec_ID, test_exec_name, test_exec_testDefID, chall_exec_ID, test_exec_userID)
+ test_exec.log.append_to_list('test execution created')
+
+ # get time1 before anything else, so the setup time is counted
+ test_exec.start_time = time1
+
+ # get challenge definition instance, and start challenge
+ challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)
+ challenge_def.run_start_challenge_code()
+
+ # memorize challenge start time
+ chall_exec.start_time = datetime.now()
+ test_exec.challenge_start_time = chall_exec.start_time
+
+ # call specific test definition code, via table of functions; this code should monitor a VNF and return when restoration is observed
test_code_index = self.test_code_ID - 1 # lists are indexed from 0 to N-1
- self.test_code_list[test_code_index]() # invoke corresponding method, via index
+ self.test_code_list[test_code_index]() # invoke corresponding method, via index; could check for return code
+
+ # memorize restoration detection time and compute recovery time
+ test_exec.restoration_detection_time = datetime.now()
+ recovery_time_metric_def = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS) # get Recovery Time metric definition: ID=1
+ test_exec.recovery_time = recovery_time_metric_def.compute(test_exec.challenge_start_time, test_exec.restoration_detection_time)
+
+ # stop challenge
+ challenge_def.run_stop_challenge_code()
+
+ # memorize challenge stop time
+ chall_exec.stop_time = datetime.now()
+ chall_exec.log.append_to_list('challenge execution finished')
+
+ # write results to CSV files, memorize test finish time
+ chall_exec.write_to_csv()
+ test_exec.finish_time = datetime.now()
+ test_exec.log.append_to_list('test execution finished')
+ test_exec.write_to_csv()
+
+
except Exception as e:
print(type(e), e)
sys.exit()
@@ -350,13 +402,10 @@ class TestDefinition(AutoBaseObject):
"""Test case code number 005."""
print("This is test_code005 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
- # here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,
- # monitoring of VNF, trigger stop code from challenge def, perform restoration of VNF
- challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)
- if challenge_def != None:
- challenge_def.run_start_challenge_code()
- challenge_def.run_stop_challenge_code()
-
+ # specific VNF recovery monitoring, specific metrics if any
+ # interact with ONAP, periodic query about VNF status; may also check VM or container status directly with VIM
+ # return when VNF is recovered
+ # may provision for failure to recover (max time to wait; return code: recovery OK boolean)
def test_code006(self):
"""Test case code number 006."""
@@ -437,9 +486,9 @@ def init_test_definitions():
test_definitions = []
# add info to list in memory, one by one, following signature values
- test_def_ID = 1
+ test_def_ID = 5
test_def_name = "VM failure impact on virtual firewall (vFW VNF)"
- test_def_challengeDefID = 1
+ test_def_challengeDefID = 5
test_def_testCaseID = 5
test_def_VNFIDs = [1]
test_def_associatedMetricsIDs = [2]
@@ -466,14 +515,20 @@ def init_test_definitions():
######################################################################
class ChallengeType(Enum):
- # server-level failures
+ # physical server-level failures 1XX
COMPUTE_HOST_FAILURE = 100
DISK_FAILURE = 101
LINK_FAILURE = 102
NIC_FAILURE = 103
- # network-level failures
- OVS_BRIDGE_FAILURE = 200
- # security stresses
+
+ # cloud-level failures 2XX
+ CLOUD_COMPUTE_FAILURE = 200
+ SDN_C_FAILURE = 201
+ OVS_BRIDGE_FAILURE = 202
+ CLOUD_STORAGE_FAILURE = 203
+ CLOUD_NETWORK_FAILURE = 204
+
+ # security stresses 3XX
HOST_TAMPERING = 300
HOST_INTRUSION = 301
NETWORK_INTRUSION = 302
@@ -619,9 +674,26 @@ class ChallengeDefinition(AutoBaseObject):
def start_challenge_code005(self):
"""Start Challenge code number 005."""
print("This is start_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
+ # challenge #5, related to test case #5, i.e. test def #5
+ # cloud reference (name and region) should be in clouds.yaml file
+ # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')
+ # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef
+ # ChallengeDef suspends/resumes VM
+ # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name
+ # conn.compute.suspend_server(this server id)
+
+
def stop_challenge_code005(self):
"""Stop Challenge code number 005."""
print("This is stop_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
+ # challenge #5, related to test case #5, i.e. test def #5
+ # cloud reference (name and region) should be in clouds.yaml file
+ # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')
+ # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef
+ # ChallengeDef suspends/resumes VM
+ # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name
+ # conn.compute.conn.compute.resume_server(this server id)
+
def start_challenge_code006(self):
"""Start Challenge code number 006."""
@@ -711,9 +783,9 @@ def init_challenge_definitions():
challenge_defs = []
# add info to list in memory, one by one, following signature values
- chall_def_ID = 1
+ chall_def_ID = 5
chall_def_name = "VM failure"
- chall_def_challengeType = ChallengeType.COMPUTE_HOST_FAILURE
+ chall_def_challengeType = ChallengeType.CLOUD_COMPUTE_FAILURE
chall_def_recipientID = 1
chall_def_impactedCloudResourcesInfo = "OpenStack VM on ctl02 in Arm pod"
chall_def_impactedCloudResourceIDs = [2]
@@ -722,8 +794,10 @@ def init_challenge_definitions():
chall_def_startChallengeCLICommandSent = "service nova-compute stop"
chall_def_stopChallengeCLICommandSent = "service nova-compute restart"
# OpenStack VM Suspend vs. Pause: suspend stores the state of VM on disk while pause stores it in memory (RAM)
+ # in CLI:
# $ nova suspend NAME
# $ nova resume NAME
+ # but better use openstack SDK
chall_def_startChallengeAPICommandSent = []
chall_def_stopChallengeAPICommandSent = []
@@ -1575,7 +1649,7 @@ def main():
challgs = init_challenge_definitions()
print(challgs)
- chall = get_indexed_item_from_file(1,FILE_CHALLENGE_DEFINITIONS)
+ chall = get_indexed_item_from_file(5,FILE_CHALLENGE_DEFINITIONS)
print(chall)
chall.run_start_challenge_code()
chall.run_stop_challenge_code()
@@ -1584,7 +1658,7 @@ def main():
tds = init_test_definitions()
print(tds)
- td = get_indexed_item_from_file(1,FILE_TEST_DEFINITIONS)
+ td = get_indexed_item_from_file(5,FILE_TEST_DEFINITIONS)
print(td)
#td.printout_all(0)
#td.run_test_code()
@@ -1604,8 +1678,8 @@ def main():
metricdef = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS)
print(metricdef)
- t1 = datetime(2018,4,1,15,10,12,500000)
- t2 = datetime(2018,4,1,15,13,43,200000)
+ t1 = datetime(2018,7,1,15,10,12,500000)
+ t2 = datetime(2018,7,1,15,13,43,200000)
r1 = metricdef.compute(t1,t2)
print(r1)
print()
@@ -1646,7 +1720,7 @@ def main():
print()
- ce1 = ChallengeExecution(1,"essai challenge execution",1)
+ ce1 = ChallengeExecution(1,"essai challenge execution",5)
ce1.start_time = datetime.now()
ce1.log.append_to_list("challenge execution log event 1")
ce1.log.append_to_list("challenge execution log event 2")
@@ -1668,7 +1742,7 @@ def main():
print()
- te1 = TestExecution(1,"essai test execution",1,1,"Gerard")
+ te1 = TestExecution(1,"essai test execution",5,1,"Gerard")
te1.start_time = datetime.now()
te1.challenge_start_time = ce1.start_time # illustrate how to set test execution challenge start time
print("te1.challenge_start_time:",te1.challenge_start_time)
diff --git a/lib/auto/testcase/resiliency/clouds.yaml b/lib/auto/testcase/resiliency/clouds.yaml
index 593a07c..e6ec824 100644
--- a/lib/auto/testcase/resiliency/clouds.yaml
+++ b/lib/auto/testcase/resiliency/clouds.yaml
@@ -14,9 +14,9 @@ clouds:
armopenstack:
auth:
auth_url: https://10.10.50.103:5000/v2.0
+ project_name: admin
username: admin
password: opnfv_secret
- project_name: admin
region_name: RegionOne
# Openstack instance on LaaS hpe16, from OPNFV Euphrates, controller IP@ (mgt: 172.16.10.101; public: 10.16.0.101)
@@ -27,9 +27,9 @@ clouds:
hpe16openstackEuphrates:
auth:
auth_url: http://10.16.0.101:5000/v2.0
+ project_name: admin
username: admin
password: opnfv_secret
- project_name: admin
region_name: RegionOne
# Openstack instance on LaaS hpe16, from OPNFV Fraser, controller IP@ (mgt: 172.16.10.36; public: 10.16.0.107)
@@ -37,12 +37,16 @@ clouds:
# admin: http://172.16.10.36:35357/v3
# internal: http://172.16.10.36:5000/v3
# public: http://10.16.0.107:5000/v3
+ # Horizon: https://10.16.0.107:8078, but need SSH port forwarding through 10.10.100.26 to be reached from outside
+ # "If you are using Identity v3 you need to specify the user and the project domain name"
hpe16openstackFraser:
auth:
auth_url: http://10.16.0.107:5000/v3
+ project_name: admin
username: admin
password: opnfv_secret
- project_name: admin
+ user_domain_name: Default
+ project_domain_name: Default
region_name: RegionOne
# ubuntu@ctl01:~$ openstack project show admin
@@ -78,14 +82,28 @@ clouds:
# | name | heat_user_domain |
# +-------------+---------------------------------------------+
-export OS_AUTH_URL=http://10.16.0.107:5000/v3
-export OS_PROJECT_ID=04fcfe7aa83f4df79ae39ca748aa8637
-export OS_PROJECT_NAME="admin"
-export OS_USER_DOMAIN_NAME="Default"
-export OS_USERNAME="admin"
-export OS_PASSWORD="opnfv_secret"
-export OS_REGION_NAME="RegionOne"
-export OS_INTERFACE=public
-export OS_IDENTITY_API_VERSION=3
+# admin user (from Horizon on hpe16):
+# Domain ID default
+# Domain Name Default
+# User Name admin
+# Description None
+# ID df0ea50cfcff4bbfbfdfefccdb018834
+# Email root@localhost
+# Enabled Yes
+# Primary Project ID 04fcfe7aa83f4df79ae39ca748aa8637
+# Primary Project Name admin
+
+
+
+
+# export OS_AUTH_URL=http://10.16.0.107:5000/v3
+# export OS_PROJECT_ID=04fcfe7aa83f4df79ae39ca748aa8637
+# export OS_PROJECT_NAME="admin"
+# export OS_USER_DOMAIN_NAME="Default"
+# export OS_USERNAME="admin"
+# export OS_PASSWORD="opnfv_secret"
+# export OS_REGION_NAME="RegionOne"
+# export OS_INTERFACE=public
+# export OS_IDENTITY_API_VERSION=3