aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore7
-rw-r--r--INFO22
-rw-r--r--README.md39
-rw-r--r--docs/release/installation/UC01-feature.userguide.rst84
-rw-r--r--docs/release/installation/UC01-installation.instruction.rst212
-rw-r--r--docs/release/installation/UC02-feature.userguide.rst145
-rw-r--r--docs/release/installation/UC02-installation.instruction.rst195
-rw-r--r--docs/release/installation/UC03-feature.userguide.rst100
-rw-r--r--docs/release/installation/UC03-installation.instruction.rst212
-rw-r--r--docs/release/installation/index.rst15
-rw-r--r--docs/release/release-notes/Auto-release-notes.rst245
-rw-r--r--docs/release/release-notes/auto-proj-rn01.pngbin0 -> 115670 bytes
-rw-r--r--docs/release/release-notes/index.rst15
-rw-r--r--docs/release/userguide/UC01-feature.userguide.rst75
-rw-r--r--docs/release/userguide/UC02-feature.userguide.rst134
-rw-r--r--docs/release/userguide/UC03-feature.userguide.rst79
-rw-r--r--docs/release/userguide/auto-UC02-control-loop-flow.pngbin0 -> 74976 bytes
-rw-r--r--docs/release/userguide/auto-UC02-data1.jpgbin0 -> 122920 bytes
-rw-r--r--docs/release/userguide/auto-UC02-data2.jpgbin0 -> 378585 bytes
-rw-r--r--docs/release/userguide/auto-UC02-data3.jpgbin0 -> 462367 bytes
-rw-r--r--docs/release/userguide/auto-UC02-module1.jpgbin0 -> 156059 bytes
-rw-r--r--docs/release/userguide/auto-UC02-module2.jpgbin0 -> 43610 bytes
-rw-r--r--docs/release/userguide/auto-UC02-pattern.jpgbin0 -> 296889 bytes
-rw-r--r--docs/release/userguide/auto-UC02-preparation.jpgbin0 -> 297095 bytes
-rw-r--r--docs/release/userguide/auto-UC02-testcases.jpgbin0 -> 219582 bytes
-rw-r--r--docs/release/userguide/index.rst27
-rw-r--r--lib/auto/__init__.py0
-rw-r--r--lib/auto/testcase/resiliency/AutoResilGlobal.py51
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfCloud.py227
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfOS.py43
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py42
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMain.py188
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMgTestDef.py1723
-rw-r--r--lib/auto/testcase/resiliency/AutoResilRunTest.py59
-rw-r--r--lib/auto/testcase/resiliency/clouds.yaml91
-rw-r--r--lib/auto/testcase/vnf/vbng/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env35
-rw-r--r--lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml288
-rw-r--r--lib/auto/testcase/vnf/vbrgemu/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env28
-rw-r--r--lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml253
-rw-r--r--lib/auto/testcase/vnf/vgmux/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env35
-rw-r--r--lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml281
-rw-r--r--lib/auto/testcase/vnf/vgw/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env32
-rw-r--r--lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml261
-rw-r--r--lib/auto/util/__init__.py0
-rw-r--r--lib/auto/util/openstack_lib.py332
-rw-r--r--lib/auto/util/util.py86
-rw-r--r--lib/auto/util/yaml_type.py12
-rwxr-xr-xprepare.sh24
-rw-r--r--requirements.txt8
-rw-r--r--setup.py29
-rw-r--r--setup/onap_on_openstack/__init__.py0
-rw-r--r--setup/onap_on_openstack/config.yml64
-rw-r--r--setup/onap_on_openstack/launch_onap.py39
-rw-r--r--setup/onap_on_openstack/onap_os_builder.py151
-rw-r--r--vcpe_spinup.sh99
-rw-r--r--vfw_spinup.sh53
-rw-r--r--vpn_subscribe.sh220
-rw-r--r--vpn_unsubscribe.sh220
62 files changed, 6638 insertions, 10 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..acb5d9d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+*.swp
+*.pyc
+/venv
+/work
+/lib/auto.egg-info
+/build
+/dist
diff --git a/INFO b/INFO
index c9a2d74..90b3dfc 100644
--- a/INFO
+++ b/INFO
@@ -11,14 +11,16 @@ IRC: Server:freenode.net Channel:#opnfv-auto
Repository: auto
Committers:
-tina.tsou@arm.com
-huangxiangyu5@huawei.com
-song.zhu@arm.com
-prasad.gorja@nxp.com
-oul.gd@chinatelecom.cn
-chenlei@caict.ac.cn
-wxy_cttl@126.com
-luxu_hd@163.com
-msambashivaiah@mvista.com
+Tina Tsou (tina.tsou@arm.com)
+Harry Huang (huangxiangyu5@huawei.com)
+Song Zhu (song.zhu@arm.com)
+Prasad Gorja (prasad.gorja@nxp.com)
+Liang Ou (oul.gd@chinatelecom.cn)
+Lei Chen (chenlei@caict.ac.cn)
+Xiaoyu Wang (wxy_cttl@126.com)
+Xu Lu (luxu_hd@163.com)
+Eric Maye (eric.dmaye@wipro.com)
+Chen Zhang (zhangchen.bri@chinatelecom.cn)
+Mohankumar Navaneethan (mnavaneethan@mvista.com)
-Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html
+Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..21e6bc9
--- /dev/null
+++ b/README.md
@@ -0,0 +1,39 @@
+Auto
+====
+
+#### Recent Changes ####
+- Add util modules for common use in project
+- Add scripts to setup ONAP (Currently only on OpenStack)
+
+
+#### Current Code Structure ####
+
+ ├── auto # Auto modules
+ │   ├── __init__.py
+ │   └── util # util modules
+ │   ├── __init__.py
+ │   ├── openstack_lib.py
+ │   ├── util.py
+ │   └── yaml_type.py
+ ├── prepare.sh # prepare virtual env, install Auto modules
+ ├── requirements.txt
+ ├── setup # scripts to setup ONAP
+ │   └── onap_on_openstack # set ONAP on OpenStack using heat
+ │   ├── config.yml
+ │   ├── __init__.py
+ │   ├── launch_onap.py
+ │   └── onap_os_builder.py
+ └── setup.py # setup Auto modules
+
+#### Setup ONAP ####
+A working ONAP environment is required before other test activity aiming for ONAP can be carried out.
+
+**Usage**:
+
+1. run command:
+
+ bash prepare.sh
+2. configure setup/onap_on_openstack/config.yml
+3. under setup/onap_on_openstack/ run command:
+
+ python launch_onap.py -c config.yml
diff --git a/docs/release/installation/UC01-feature.userguide.rst b/docs/release/installation/UC01-feature.userguide.rst
new file mode 100644
index 0000000..5da0865
--- /dev/null
+++ b/docs/release/installation/UC01-feature.userguide.rst
@@ -0,0 +1,84 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) optionally add copywriters name
+
+
+================================================================
+Auto User Guide: Use Case 1 Edge Cloud
+================================================================
+
+This document provides the user guide for Fraser release of Auto,
+specifically for Use Case 1: Edge Cloud.
+
+.. contents::
+ :depth: 3
+ :local:
+
+
+Description
+===========
+
+This use case aims at showcasing the benefits of using ONAP for autonomous Edge Cloud management.
+
+A high level of automation of VNF lifecycle event handling after launch is enabled by ONAP policies
+and closed-loop controls, which take care of most lifecycle events (start, stop, scale up/down/in/out,
+recovery/migration for HA) as well as their monitoring and SLA management.
+
+Multiple types of VNFs, for different execution environments, are first approved in the catalog thanks
+to the onboarding process, and then can be deployed and handled by multiple controllers in a systematic way.
+
+This results in management efficiency (lower control/automation overhead) and high degree of autonomy.
+
+
+Preconditions:
+#. hardware environment in which Edge cloud may be deployed
+#. an Edge cloud has been deployed and is ready for operation
+#. ONAP has been deployed onto a Cloud, and is interfaced (i.e. provisioned for API access) to the Edge cloud
+
+
+
+Main Success Scenarios:
+
+* lifecycle management - stop, stop, scale (dependent upon telemetry)
+
+* recovering from faults (detect, determine appropriate response, act); i.e. exercise closed-loop policy engine in ONAP
+
+ * verify mechanics of control plane interaction
+
+* collection of telemetry for machine learning
+
+
+Details on the test cases corresponding to this use case:
+
+* Environment check
+
+ * Basic environment check: Create test script to check basic VIM (OpenStack), ONAP, and VNF are up and running
+
+* VNF lifecycle management
+
+ * VNF Instance Management: Validation of VNF Instance Management which includes VNF instantiation, VNF State Management and termination
+
+ * Tacker Monitoring Driver (VNFMonitorPing):
+
+ * Write Tacker Monitor driver to handle monitor_call and based on return state value create custom events
+ * If Ping to VNF fails, trigger below events
+
+ * Event 1 : Collect failure logs from VNF
+ * Event 2 : Soft restart/respawn the VNF
+
+ * Integrate with Telemetry
+
+ * Create TOSCA template policies to implement ceilometer data collection service
+ * Collect CPU utilization data, compare with threshold, and perform action accordingly (respawn, scale-in/scale-out)
+
+
+
+Test execution high-level description
+=====================================
+
+<TBC>
+
+
+
+
diff --git a/docs/release/installation/UC01-installation.instruction.rst b/docs/release/installation/UC01-installation.instruction.rst
new file mode 100644
index 0000000..9ecb8bd
--- /dev/null
+++ b/docs/release/installation/UC01-installation.instruction.rst
@@ -0,0 +1,212 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) optionally add copywriters name
+
+========
+Abstract
+========
+
+This document describes how to install OPNFV Auto Use Case 1: Edge Cloud, its dependencies and required system resources.
+
+.. contents::
+ :depth: 3
+ :local:
+
+Version history
+---------------------
+
++--------------------+--------------------+--------------------+--------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-04-14 | 0.1.0 | Jonas Bjurel | First draft |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| | 0.1.1 | | |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| | 1.0 | | |
+| | | | |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+
+
+Introduction
+============
+<INTRODUCTION TO THE SCOPE AND INTENTION OF THIS DOCUMENT AS WELL AS TO THE SYSTEM TO BE INSTALLED>
+
+<EXAMPLE>:
+
+This document describes the supported software and hardware configurations for the
+Fuel OPNFV reference platform as well as providing guidelines on how to install and
+configure such reference system.
+
+Although the available installation options gives a high degree of freedom in how the system is set-up,
+with what architecture, services and features, etc., not nearly all of those permutations provides
+a OPNFV compliant reference architecture. Following the guidelines in this document ensures
+a result that is OPNFV compliant.
+
+The audience of this document is assumed to have good knowledge in network and Unix/Linux administration.
+
+
+Preface
+=======
+<DESCRIBE NEEDED PREREQUISITES, PLANNING, ETC.>
+
+<EXAMPLE>:
+
+Before starting the installation of Fuel@OPNFV, some planning must preceed.
+
+First of all, the Fuel@OPNFV .iso image needs to be retrieved,
+the Latest stable Arno release of Fuel@OPNFV can be found here: <www.opnfv.org/abc/def>
+
+Alternatively, you may build the .iso from source by cloning the opnfv/genesis git repository:
+<git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis>
+Check-out the Arno release:
+<cd genesis; git checkout arno>
+Goto the fuel directory and build the .iso
+<cd fuel/build; make all>
+
+Familiarize yourself with the Fuel 6.0.1 version by reading the following documents:
+- abc <http://wiki.openstack.org/abc>
+- def <http://wiki.openstack.org/def>
+- ghi <http://wiki.openstack.org/ghi>
+
+Secondly, a number of deployment specific parameters must be collected, those are:
+
+1. Provider sub-net and gateway information
+
+2. Provider VLAN information
+
+3. Provider DNS addresses
+
+4. Provider NTP addresses
+
+This information will be needed for the configuration procedures provided in this document.
+
+
+Hardware requirements
+=====================
+<PROVIDE A LIST OF MINIMUM HARDWARE REQUIREMENTS NEEDED FOR THE INSTALL>
+
+<EXAMPLE>:
+
+Following minimum hardware requirements must be met for installation of Fuel@OPNFV:
+
++--------------------+----------------------------------------------------+
+| **HW Aspect** | **Requirement** |
+| | |
++--------------------+----------------------------------------------------+
+| **# of servers** | Minimum 5 (3 for non redundant deployment) |
+| | 1 Fuel deployment master (may be virtualized) |
+| | 3(1) Controllers |
+| | 1 Compute |
++--------------------+----------------------------------------------------+
+| **CPU** | Minimum 1 socket x86_AMD64 Ivy bridge 1.6 GHz |
+| | |
++--------------------+----------------------------------------------------+
+| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
+| | |
++--------------------+----------------------------------------------------+
+| **Disk** | Minimum 256GB 10kRPM spinning disks |
+| | |
++--------------------+----------------------------------------------------+
+| **NICs** | 2(1)x10GE Niantec for Private/Public (Redundant) |
+| | |
+| | 2(1)x10GE Niantec for SAN (Redundant) |
+| | |
+| | 2(1)x1GE for admin (PXE) and control (RabitMQ,etc) |
+| | |
++--------------------+----------------------------------------------------+
+
+
+Top of the rack (TOR) Configuration requirements
+================================================
+<DESCRIBE NEEDED NETWORK TOPOLOGY SETUP IN THE TORs>
+
+<EXAMPLE>:
+
+The switching infrastructure provides connectivity for the OPNFV infra-structure operations as well as
+for the tenant networks (East/West) and provider connectivity (North/South bound connectivity).
+The switching connectivity can (but does not need to) be fully redundant,
+in case it and comprises a redundant 10GE switch pair for "Traffic/Payload/SAN" purposes as well as
+a 1GE switch pair for "infrastructure control-, management and administration"
+
+The switches are **not** automatically configured from the OPNFV reference platform.
+All the networks involved in the OPNFV infra-structure as well as the provider networks
+and the private tenant VLANs needs to be manually configured.
+
+This following sections guides through required black-box switch configurations.
+
+VLAN considerations and blue-print
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+IP Address plan considerations and blue-print
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+
+OPNFV Software installation and deployment
+==========================================
+<DESCRIBE THE FULL PROCEDURES FOR THE INSTALLATION OF THE OPNFV COMPONENT INSTALLATION AND DEPLOYMENT>
+
+<EXAMPLE>:
+
+This section describes the installation of the Fuel@OPNFV installation server (Fuel master)
+as well as the deployment of the full OPNFV reference platform stack across a server cluster.
+Etc.
+
+Install Fuel master
+^^^^^^^^^^^^^^^^^^^^^
+
+Create an OPNV (Fuel Environment)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Configure the OPNFV environment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Deploy the OPNFV environment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+
+Installation health-check
+=========================
+<DESCRIBE ANY MEANS TO DO VERIFY THE INTEGRITY AND HEALTHYNESS OF THE INSTALL>
+
+<EXAMPLE>:
+
+Now that the OPNFV environment has been created, and before the post installation configurations is started,
+perform a system health check from the Fuel GUI:
+
+- Select the "Health check" TAB.
+- Select all test-cases
+- And click "Run tests"
+
+All test cases except the following should pass:
+
+Post installation and deployment actions
+------------------------------------------
+<DESCRIBE ANY POST INSTALLATION ACTIONS/CONFIGURATIONS NEEDED>
+
+<EXAMPLE>:
+After the OPNFV deployment is completed, the following manual changes needs to be performed in order
+for the system to work according OPNFV standards.
+
+**Change host OS password:**
+Change the Host OS password by......
+
+
+References
+==========
+<PROVIDE NEEDED/USEFUL REFERENCES>
+
+<EXAMPLES>:
+
+OPNFV
+^^^^^^^^^^
+
+OpenStack
+^^^^^^^^^^^
+
+OpenDaylight
+^^^^^^^^^^^^^^^
diff --git a/docs/release/installation/UC02-feature.userguide.rst b/docs/release/installation/UC02-feature.userguide.rst
new file mode 100644
index 0000000..32a6df8
--- /dev/null
+++ b/docs/release/installation/UC02-feature.userguide.rst
@@ -0,0 +1,145 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) optionally add copywriters name
+
+
+================================================================
+Auto User Guide: Use Case 2 Resiliency Improvements Through ONAP
+================================================================
+
+This document provides the user guide for Fraser release of Auto,
+specifically for Use Case 2: Resiliency Improvements Through ONAP.
+
+.. contents::
+ :depth: 3
+ :local:
+
+
+Description
+===========
+
+This use case illustrates VNF failure recovery time reduction with ONAP, thanks to its automated monitoring and management.
+It simulates an underlying problem (failure, stress, etc.: any adverse condition in the network that can impact VNFs),
+tracks a VNF, and measures the amount of time it takes for ONAP to restore the VNF functionality.
+
+The benefit for NFV edge service providers is to assess what degree of added VIM+NFVI platform resilience for VNFs is obtained by
+leveraging ONAP closed-loop control, vs. VIM+NFVI self-managed resilience (which may not be aware of the VNF or the corresponding
+end-to-end Service, but only of underlying resources such as VMs and servers).
+
+
+Preconditions:
+
+#. hardware environment in which Edge cloud may be deployed
+#. Edge cloud has been deployed and is ready for operation
+#. ONAP has been deployed onto a cloud and is interfaced (i.e. provisioned for API access) to the Edge cloud
+#. Components of ONAP have been deployed on the Edge cloud as necessary for specific test objectives
+
+In future releases, Auto Use cases will also include the deployment of ONAP (if not already installed), the deployment
+of test VNFs (pre-existing VNFs in pre-existing ONAP can be used in the test as well), the configuration of ONAP for
+monitoring these VNFs (policies, CLAMP, DCAE), in addition to the test scripts which simulate a problem and measures recovery time.
+
+Different types of problems can be simulated, hence the identification of multiple test cases corresponding to this use case,
+as illustrated in this diagram:
+
+.. image:: auto-UC02-testcases.jpg
+
+Description of simulated problems/challenges:
+
+* Physical Infra Failure
+
+ * Migration upon host failure: Compute host power is interrupted, and affected workloads are migrated to other available hosts.
+ * Migration upon disk failure: Disk volumes are unmounted, and affected workloads are migrated to other available hosts.
+ * Migration upon link failure: Traffic on links is interrupted/corrupted, and affected workloads are migrated to other available hosts.
+ * Migration upon NIC failure: NIC ports are disabled by host commands, and affected workloads are migrated to other available hosts.
+
+* Virtual Infra Failure
+
+ * OpenStack compute host service fail: Core OpenStack service processes on compute hosts are terminated, and auto-restored, or affected workloads are migrated to other available hosts.
+ * SDNC service fail: Core SDNC service processes are terminated, and auto-restored.
+ * OVS fail: OVS bridges are disabled, and affected workloads are migrated to other available hosts.
+ * etc.
+
+* Security
+
+ * Host tampering: Host tampering is detected, the host is fenced, and affected workloads are migrated to other available hosts.
+ * Host intrusion: Host intrusion attempts are detected, an offending workload, device, or flow is identified and fenced, and as needed affected workloads are migrated to other available hosts.
+ * Network intrusion: Network intrusion attempts are detected, and an offending flow is identified and fenced.
+
+
+
+
+Test execution high-level description
+=====================================
+
+The following two MSCs (Message Sequence Charts) show the actors and high-level interactions.
+
+The first MSC shows the preparation activities (assuming the hardware, network, cloud, and ONAP have already been installed):
+onboarding and deployment of VNFs (via ONAP portal and modules in sequence: SDC, VID, SO), and ONAP configuration
+(policy framework, closed-loops in CLAMP, activation of DCAE).
+
+.. image:: auto-UC02-preparation.jpg
+
+The second MSC illustrates the pattern of all test cases for the Resiliency Improvements:
+* simulate the chosen problem (a.k.a. a "Challenge") for this test case, for example suspend a VM which may be used by a VNF
+* start tracking the target VNF of this test case
+* measure the ONAP-orchestrated VNF Recovery Time
+* then the test stops simulating the problem (for example: resume the VM that was suspended),
+
+In parallel, the MSC also shows the sequence of events happening in ONAP, thanks to its configuration to provide Service
+Assurance for the VNF.
+
+.. image:: auto-UC02-pattern.jpg
+
+
+Test design: data model, implementation modules
+===============================================
+
+The high-level design of classes shows the identification of several entities:
+* Test Case: as identified above, each is a special case of the overall use case (e.g., categorized by challenge type)
+* Test Definition: gathers all the information necessary to run a certain test case
+* Metric Definition: describes a certain metric that may be measured, in addition to Recovery Time
+* Challenge Definition: describe the challenge (problem, failure, stress, ...) simulated by the test case
+* Recipient: entity that can receive commands and send responses, and that is queried by the Test Definition or Challenge Definition
+(a recipient would be typically a management service, with interfaces (CLI or API) for clients to query)
+* Resources: with 3 types (VNF, cloud virtual resource such as a VM, physical resource such as a server)
+
+Three of these entities have execution-time corresponding classes:
+* Test Execution, which captures all the relevant data of the execution of a Test Definition
+* Challenge Execution, which captures all the relevant data of the execution of a Challenge Definition
+* Metric Value, which captures the a quantitative measurement of a Metric Definition (with a timestamp)
+
+.. image:: auto-UC02-data1.jpg
+
+The following diagram illustrates an implementation-independent design of the attributes of these entities:
+.. image:: auto-UC02-data2.jpg
+
+This next diagram shows the Python classes and attributes, as implemented by this Use Case (for all test cases):
+
+.. image:: auto-UC02-data3.jpg
+
+Test definition data is stored in serialization files (Python pickles), while test execution data is stored in CSV
+files, for easier post-analysis.
+
+The module design is straightforward: functions and classes for managing data, for interfacing with recipients,
+for executing tests, and for interacting with the test user (choosing a Test Definition, showing the details
+of a Test Definition, starting the execution).
+
+.. image:: auto-UC02-module1.jpg
+
+This last diagram shows the test user menu functions:
+
+.. image:: auto-UC02-module2.jpg
+
+In future releases of Auto, testing environments such as FuncTest and Yardstick might be leveraged.
+
+Also, anonymized test results could be collected from users willing to share them, and aggregates could be
+maintained as benchmarks.
+
+
+
+
+
+
+
+
diff --git a/docs/release/installation/UC02-installation.instruction.rst b/docs/release/installation/UC02-installation.instruction.rst
new file mode 100644
index 0000000..0e126dd
--- /dev/null
+++ b/docs/release/installation/UC02-installation.instruction.rst
@@ -0,0 +1,195 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) optionally add copywriters name
+
+========
+Abstract
+========
+
+This document describes how to install OPNFV Auto Use Case 2: Resiliency Improvements Through ONAP, its dependencies and required system resources.
+
+.. contents::
+ :depth: 3
+ :local:
+
+
+
+Introduction
+============
+<INTRODUCTION TO THE SCOPE AND INTENTION OF THIS DOCUMENT AS WELL AS TO THE SYSTEM TO BE INSTALLED>
+
+<EXAMPLE>:
+
+This document describes the supported software and hardware configurations for the
+Fuel OPNFV reference platform as well as providing guidelines on how to install and
+configure such reference system.
+
+Although the available installation options gives a high degree of freedom in how the system is set-up,
+with what architecture, services and features, etc., not nearly all of those permutations provides
+a OPNFV compliant reference architecture. Following the guidelines in this document ensures
+a result that is OPNFV compliant.
+
+The audience of this document is assumed to have good knowledge in network and Unix/Linux administration.
+
+
+Preface
+=======
+<DESCRIBE NEEDED PREREQUISITES, PLANNING, ETC.>
+
+<EXAMPLE>:
+
+Before starting the installation of Fuel@OPNFV, some planning must preceed.
+
+First of all, the Fuel@OPNFV .iso image needs to be retrieved,
+the Latest stable Arno release of Fuel@OPNFV can be found here: <www.opnfv.org/abc/def>
+
+Alternatively, you may build the .iso from source by cloning the opnfv/genesis git repository:
+<git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis>
+Check-out the Arno release:
+<cd genesis; git checkout arno>
+Goto the fuel directory and build the .iso
+<cd fuel/build; make all>
+
+Familiarize yourself with the Fuel 6.0.1 version by reading the following documents:
+- abc <http://wiki.openstack.org/abc>
+- def <http://wiki.openstack.org/def>
+- ghi <http://wiki.openstack.org/ghi>
+
+Secondly, a number of deployment specific parameters must be collected, those are:
+
+1. Provider sub-net and gateway information
+
+2. Provider VLAN information
+
+3. Provider DNS addresses
+
+4. Provider NTP addresses
+
+This information will be needed for the configuration procedures provided in this document.
+
+
+Hardware requirements
+=====================
+<PROVIDE A LIST OF MINIMUM HARDWARE REQUIREMENTS NEEDED FOR THE INSTALL>
+
+<EXAMPLE>:
+
+Following minimum hardware requirements must be met for installation of Fuel@OPNFV:
+
++--------------------+----------------------------------------------------+
+| **HW Aspect** | **Requirement** |
+| | |
++--------------------+----------------------------------------------------+
+| **# of servers** | Minimum 5 (3 for non redundant deployment) |
+| | 1 Fuel deployment master (may be virtualized) |
+| | 3(1) Controllers |
+| | 1 Compute |
++--------------------+----------------------------------------------------+
+| **CPU** | Minimum 1 socket x86_AMD64 Ivy bridge 1.6 GHz |
+| | |
++--------------------+----------------------------------------------------+
+| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
+| | |
++--------------------+----------------------------------------------------+
+| **Disk** | Minimum 256GB 10kRPM spinning disks |
+| | |
++--------------------+----------------------------------------------------+
+| **NICs** | 2(1)x10GE Niantec for Private/Public (Redundant) |
+| | |
+| | 2(1)x10GE Niantec for SAN (Redundant) |
+| | |
+| | 2(1)x1GE for admin (PXE) and control (RabitMQ,etc) |
+| | |
++--------------------+----------------------------------------------------+
+
+
+Top of the rack (TOR) Configuration requirements
+================================================
+<DESCRIBE NEEDED NETWORK TOPOLOGY SETUP IN THE TORs>
+
+<EXAMPLE>:
+
+The switching infrastructure provides connectivity for the OPNFV infra-structure operations as well as
+for the tenant networks (East/West) and provider connectivity (North/South bound connectivity).
+The switching connectivity can (but does not need to) be fully redundant,
+in case it and comprises a redundant 10GE switch pair for "Traffic/Payload/SAN" purposes as well as
+a 1GE switch pair for "infrastructure control-, management and administration"
+
+The switches are **not** automatically configured from the OPNFV reference platform.
+All the networks involved in the OPNFV infra-structure as well as the provider networks
+and the private tenant VLANs needs to be manually configured.
+
+This following sections guides through required black-box switch configurations.
+
+VLAN considerations and blue-print
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+IP Address plan considerations and blue-print
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+
+OPNFV Software installation and deployment
+==========================================
+<DESCRIBE THE FULL PROCEDURES FOR THE INSTALLATION OF THE OPNFV COMPONENT INSTALLATION AND DEPLOYMENT>
+
+<EXAMPLE>:
+
+This section describes the installation of the Fuel@OPNFV installation server (Fuel master)
+as well as the deployment of the full OPNFV reference platform stack across a server cluster.
+Etc.
+
+Install Fuel master
+^^^^^^^^^^^^^^^^^^^^^
+
+Create an OPNV (Fuel Environment)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Configure the OPNFV environment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Deploy the OPNFV environment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+
+Installation health-check
+=========================
+<DESCRIBE ANY MEANS TO DO VERIFY THE INTEGRITY AND HEALTHYNESS OF THE INSTALL>
+
+<EXAMPLE>:
+
+Now that the OPNFV environment has been created, and before the post installation configurations is started,
+perform a system health check from the Fuel GUI:
+
+- Select the "Health check" TAB.
+- Select all test-cases
+- And click "Run tests"
+
+All test cases except the following should pass:
+
+Post installation and deployment actions
+------------------------------------------
+<DESCRIBE ANY POST INSTALLATION ACTIONS/CONFIGURATIONS NEEDED>
+
+<EXAMPLE>:
+After the OPNFV deployment is completed, the following manual changes needs to be performed in order
+for the system to work according OPNFV standards.
+
+**Change host OS password:**
+Change the Host OS password by......
+
+
+References
+==========
+<PROVIDE NEEDED/USEFUL REFERENCES>
+
+<EXAMPLES>:
+
+OPNFV
+^^^^^^^^^^
+
+OpenStack
+^^^^^^^^^^^
+
+OpenDaylight
+^^^^^^^^^^^^^^^
diff --git a/docs/release/installation/UC03-feature.userguide.rst b/docs/release/installation/UC03-feature.userguide.rst
new file mode 100644
index 0000000..354d052
--- /dev/null
+++ b/docs/release/installation/UC03-feature.userguide.rst
@@ -0,0 +1,100 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) optionally add copywriters name
+
+
+================================================================
+Auto User Guide: Use Case 3 Enterprise vCPE
+================================================================
+
+This document provides the user guide for Fraser release of Auto,
+specifically for Use Case 3: Enterprise vCPE.
+
+.. contents::
+ :depth: 3
+ :local:
+
+
+Description
+===========
+
+This Use Case shows how ONAP can help ensuring that virtual CPEs (including vFW: virtual firewalls) in Edge Cloud are enterprise-grade.
+
+ONAP operations include a verification process for VNF onboarding (i.e. inclusion in the ONAP catalog),
+with multiple Roles (designer, tester, governor, operator), responsible for approving proposed VNFs
+(as VSPs (Vendor Software Products), and eventually as end-to-end Services).
+
+This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only
+chosen from such an approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet
+enterprise-grade requirements. ONAP provides a NBI in addition to a standard portal, thus enabling
+a programmatic deployment of VNFs, still conforming to ONAP processes.
+
+Moreover, ONAP also comprises real-time monitoring (by the DCAE component), which monitors performance for SLAs,
+can adjust allocated resources accordingly (elastic adjustment at VNF level), and can ensure High Availability.
+
+DCAE executes directives coming from policies described in the Policy Framework, and closed-loop controls
+described in the CLAMP component.
+
+Finally, this automated approach also reduces costs, since repetitive actions are designed once and executed multiple times,
+as vCPEs are instantiated and decommissioned (frequent events, given the variability of business activity,
+and a Small Business market similar to the Residential market: many contract updates resulting in many vCPE changes).
+
+NFV edge service providers need to provide site2site, site2dc (Data Center) and site2internet services to tenants
+both efficiently and safely, by deploying such qualified enterprise-grade vCPE.
+
+
+Preconditions:
+
+#. hardware environment in which Edge cloud may be deployed
+#. an Edge cloud has been deployed and is ready for operation
+#. enterprise edge devices, such as ThinCPE, have access to the Edge cloud with WAN interfaces
+#. ONAP components (MSO, SDN-C, APP-C and VNFM) have been deployed onto a cloud and are interfaced (i.e. provisioned for API access) to the Edge cloud
+
+
+Main Success Scenarios:
+
+* VNF spin-up
+
+ * vCPE spin-up: MSO calls the VNFM to spin up a vCPE instance from the catalog and then updates the active VNF list
+ * vFW spin-up: MSO calls the VNFM to spin up a vFW instance from the catalog and then updates the active VNF list
+
+* site2site
+
+ * L3VPN service subscribing: MSO calls the SDNC to create VXLAN tunnels to carry L2 traffic between client's ThinCPE and SP's vCPE, and enables vCPE to route between different sites.
+ * L3VPN service unsubscribing: MSO calls the SDNC to destroy tunnels and routes, thus disable traffic between different sites.
+
+
+See `ONAP description of vCPE use case <https://wiki.onap.org/display/DW/Use+Case+proposal%3A+Enterprise+vCPE>`_ for more details, including MSCs.
+
+
+Details on the test cases corresponding to this use case:
+
+* VNF Management
+
+ * Spin up a vCPE instance: Spin up a vCPE instance, by calling NBI of the orchestrator.
+ * Spin up a vFW instance: Spin up a vFW instance, by calling NBI of the orchestrator.
+
+* VPN as a Service
+ * Subscribe to a VPN service: Subscribe to a VPN service, by calling NBI of the orchestrator.
+ * Unsubscribe to a VPN service: Unsubscribe to a VPN service, by calling NBI of the orchestrator.
+
+* Internet as a Service
+
+ * Subscribe to an Internet service: Subscribe to an Internet service, by calling NBI of the orchestrator.
+ * Unsubscribe to an Internet service: Unsubscribe to an Internet service, by calling NBI of the orchestrator.
+
+
+Test execution high-level description
+=====================================
+
+<TBC>
+
+
+
+
+
+
+
+
+
diff --git a/docs/release/installation/UC03-installation.instruction.rst b/docs/release/installation/UC03-installation.instruction.rst
new file mode 100644
index 0000000..0221885
--- /dev/null
+++ b/docs/release/installation/UC03-installation.instruction.rst
@@ -0,0 +1,212 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) optionally add copywriters name
+
+========
+Abstract
+========
+
+This document describes how to install OPNFV Auto Use Case 3: Enterprise vCPE, its dependencies and required system resources.
+
+.. contents::
+ :depth: 3
+ :local:
+
+Version history
+---------------------
+
++--------------------+--------------------+--------------------+--------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-04-14 | 0.1.0 | Jonas Bjurel | First draft |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| | 0.1.1 | | |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| | 1.0 | | |
+| | | | |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+
+
+Introduction
+============
+<INTRODUCTION TO THE SCOPE AND INTENTION OF THIS DOCUMENT AS WELL AS TO THE SYSTEM TO BE INSTALLED>
+
+<EXAMPLE>:
+
+This document describes the supported software and hardware configurations for the
+Fuel OPNFV reference platform as well as providing guidelines on how to install and
+configure such reference system.
+
+Although the available installation options gives a high degree of freedom in how the system is set-up,
+with what architecture, services and features, etc., not nearly all of those permutations provides
+a OPNFV compliant reference architecture. Following the guidelines in this document ensures
+a result that is OPNFV compliant.
+
+The audience of this document is assumed to have good knowledge in network and Unix/Linux administration.
+
+
+Preface
+=======
+<DESCRIBE NEEDED PREREQUISITES, PLANNING, ETC.>
+
+<EXAMPLE>:
+
+Before starting the installation of Fuel@OPNFV, some planning must preceed.
+
+First of all, the Fuel@OPNFV .iso image needs to be retrieved,
+the Latest stable Arno release of Fuel@OPNFV can be found here: <www.opnfv.org/abc/def>
+
+Alternatively, you may build the .iso from source by cloning the opnfv/genesis git repository:
+<git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis>
+Check-out the Arno release:
+<cd genesis; git checkout arno>
+Goto the fuel directory and build the .iso
+<cd fuel/build; make all>
+
+Familiarize yourself with the Fuel 6.0.1 version by reading the following documents:
+- abc <http://wiki.openstack.org/abc>
+- def <http://wiki.openstack.org/def>
+- ghi <http://wiki.openstack.org/ghi>
+
+Secondly, a number of deployment specific parameters must be collected, those are:
+
+1. Provider sub-net and gateway information
+
+2. Provider VLAN information
+
+3. Provider DNS addresses
+
+4. Provider NTP addresses
+
+This information will be needed for the configuration procedures provided in this document.
+
+
+Hardware requirements
+=====================
+<PROVIDE A LIST OF MINIMUM HARDWARE REQUIREMENTS NEEDED FOR THE INSTALL>
+
+<EXAMPLE>:
+
+Following minimum hardware requirements must be met for installation of Fuel@OPNFV:
+
++--------------------+----------------------------------------------------+
+| **HW Aspect** | **Requirement** |
+| | |
++--------------------+----------------------------------------------------+
+| **# of servers** | Minimum 5 (3 for non redundant deployment) |
+| | 1 Fuel deployment master (may be virtualized) |
+| | 3(1) Controllers |
+| | 1 Compute |
++--------------------+----------------------------------------------------+
+| **CPU** | Minimum 1 socket x86_AMD64 Ivy bridge 1.6 GHz |
+| | |
++--------------------+----------------------------------------------------+
+| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
+| | |
++--------------------+----------------------------------------------------+
+| **Disk** | Minimum 256GB 10kRPM spinning disks |
+| | |
++--------------------+----------------------------------------------------+
+| **NICs** | 2(1)x10GE Niantec for Private/Public (Redundant) |
+| | |
+| | 2(1)x10GE Niantec for SAN (Redundant) |
+| | |
+| | 2(1)x1GE for admin (PXE) and control (RabitMQ,etc) |
+| | |
++--------------------+----------------------------------------------------+
+
+
+Top of the rack (TOR) Configuration requirements
+================================================
+<DESCRIBE NEEDED NETWORK TOPOLOGY SETUP IN THE TORs>
+
+<EXAMPLE>:
+
+The switching infrastructure provides connectivity for the OPNFV infra-structure operations as well as
+for the tenant networks (East/West) and provider connectivity (North/South bound connectivity).
+The switching connectivity can (but does not need to) be fully redundant,
+in case it and comprises a redundant 10GE switch pair for "Traffic/Payload/SAN" purposes as well as
+a 1GE switch pair for "infrastructure control-, management and administration"
+
+The switches are **not** automatically configured from the OPNFV reference platform.
+All the networks involved in the OPNFV infra-structure as well as the provider networks
+and the private tenant VLANs needs to be manually configured.
+
+This following sections guides through required black-box switch configurations.
+
+VLAN considerations and blue-print
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+IP Address plan considerations and blue-print
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+
+OPNFV Software installation and deployment
+==========================================
+<DESCRIBE THE FULL PROCEDURES FOR THE INSTALLATION OF THE OPNFV COMPONENT INSTALLATION AND DEPLOYMENT>
+
+<EXAMPLE>:
+
+This section describes the installation of the Fuel@OPNFV installation server (Fuel master)
+as well as the deployment of the full OPNFV reference platform stack across a server cluster.
+Etc.
+
+Install Fuel master
+^^^^^^^^^^^^^^^^^^^^^
+
+Create an OPNV (Fuel Environment)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Configure the OPNFV environment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Deploy the OPNFV environment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+
+Installation health-check
+=========================
+<DESCRIBE ANY MEANS TO DO VERIFY THE INTEGRITY AND HEALTHYNESS OF THE INSTALL>
+
+<EXAMPLE>:
+
+Now that the OPNFV environment has been created, and before the post installation configurations is started,
+perform a system health check from the Fuel GUI:
+
+- Select the "Health check" TAB.
+- Select all test-cases
+- And click "Run tests"
+
+All test cases except the following should pass:
+
+Post installation and deployment actions
+------------------------------------------
+<DESCRIBE ANY POST INSTALLATION ACTIONS/CONFIGURATIONS NEEDED>
+
+<EXAMPLE>:
+After the OPNFV deployment is completed, the following manual changes needs to be performed in order
+for the system to work according OPNFV standards.
+
+**Change host OS password:**
+Change the Host OS password by......
+
+
+References
+==========
+<PROVIDE NEEDED/USEFUL REFERENCES>
+
+<EXAMPLES>:
+
+OPNFV
+^^^^^^^^^^
+
+OpenStack
+^^^^^^^^^^^
+
+OpenDaylight
+^^^^^^^^^^^^^^^
diff --git a/docs/release/installation/index.rst b/docs/release/installation/index.rst
new file mode 100644
index 0000000..0120e92
--- /dev/null
+++ b/docs/release/installation/index.rst
@@ -0,0 +1,15 @@
+.. _auto-configguide:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+=====================================================
+OPNFV Auto (ONAP-Automated OPNFV) Configuration Guide
+=====================================================
+
+.. toctree::
+ :maxdepth: 1
+
+ UC01-installation.instruction.rst
+ UC02-installation.instruction.rst
+ UC03-installation.instruction.rst
diff --git a/docs/release/release-notes/Auto-release-notes.rst b/docs/release/release-notes/Auto-release-notes.rst
new file mode 100644
index 0000000..84665cd
--- /dev/null
+++ b/docs/release/release-notes/Auto-release-notes.rst
@@ -0,0 +1,245 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+==================
+Auto Release Notes
+==================
+
+This document provides the release notes for Fraser release of Auto.
+
+
+Important notes
+===============
+
+Initial release (project inception: July 2017).
+
+
+Summary
+=======
+
+OPNFV is a SDNFV system integration project for open-source components, which so far have been mostly limited to the NFVI+VIM as generally described by ETSI.
+
+In particular, OPNFV has yet to integrate higher-level automation features for VNFs and end-to-end Services.
+
+Auto ("ONAP-Automated OPNFV") will focus on ONAP component integration and verification with OPNFV reference platforms/scenarios, through primarily a post-install process in order to avoid impact to OPNFV installer projects. As much as possible, this will use a generic installation/integration process (not specific to any OPNFV installer's technology).
+
+* `ONAP <https://www.onap.org/`_ (a Linux Foundation Project) is an open source software platform that delivers robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of Software-Defined Networks (SDNs).
+
+While all of ONAP is in scope, as it proceeds, the project will focus on specific aspects of this integration and verification in each release. Some example topics and work items include:
+
+* How ONAP meets VNFM standards, and interacts with VNFs from different vendors
+* How ONAP SDN-C uses OPNFV existing features, e.g. NetReady, in a two-layer controller architecture in which the upper layer (global controller) is replaceable, and the lower layer can use different vendor’s local controller to interact with SDN-C
+* What data collection interface VNF and controllers provide to ONAP DCAE, and (through DCAE), to closed-loop control functions such as Policy Tests which verify interoperability of ONAP automation/lifecycle features with specific NFVI and VIM features, as prioritized by the project with technical community and EUAG input. Examples include:
+
+ * Abstraction of networking tech/features e.g. through NetReady/Gluon
+ * Blueprint-based VNF deployment (HOT, TOSCA, YANG)
+ * Application level configuration and lifecycle through YANG (for any aspects depending upon OPNFV NFVI+VIM components)
+ * Policy (through DCAE)
+ * Telemetry (through VES/DCAE)
+
+Initial areas of focus for Auto (in orange dotted lines; this scope can be expanded for future releases). It is understood that:
+
+* ONAP scope extends beyond the lines drawn below
+* ONAP architecture does not necessarily align with the ETSI NFV inspired diagrams this is based upon
+
+.. image:: auto-proj-rn01.png
+
+
+Testability:
+
+* Tests will be developed for use cases within the project scope.
+* In future releases, tests will be added to Functest runs for supporting scenarios.
+
+Auto’s goals include the standup and tests for integrated ONAP-Cloud platforms (“Cloud” here being OPNFV “scenarios” or other cloud environments). Thus, the artifacts would be tools to deploy ONAP (leveraging OOM whenever possible (starting with Beijing release of ONAP), and a preference for the containerized version of ONAP), to integrate it with clouds, to onboard and deploy test VNFs, to configure policies and closed-loop controls, and to run use-case defined tests against that integrated environment. OPNFV scenarios would be a possible component in the above.
+
+Auto currently defines three use cases: Edge Cloud, Resiliency Improvements, and Enterprise vCPE. These use cases aim to show:
+
+* increased autonomy of Edge Cloud management (automation, catalog-based deployment)
+* increased resilience (i.e. fast VNF recovery in case of failure or problem, thanks to closed-loop control)
+* enterprise-grade performance of vCPEs (certification during onboarding, then real-time performance assurance with SLAs and HA).
+
+The use cases define test cases, which initially will be independent, but which might eventually be integrated to FuncTest.
+
+Additional use cases can be added in the future, such as vIMS (example: project Clearwater).
+
+Target architectures include x86 and Arm.
+
+An ONAP instance (without DCAE) has been installed over Kubernetes on bare metal on an x86 pod of 6 servers at UNH IOL.
+Onboarding of 2 VNFs is in progress: a vCPE and a vFW.
+
+Integration with Arm servers has started (exploring binary compatibility):
+
+* Openstack is currently installed on a 6-server pod of Arm servers
+* a Kubernetes cluster is installed there as well, for another instance of ONAP on Arm servers
+* An additional set of 14 Arm servers is in the process of being deployed at UNH, for increased capacity
+* LaaS (Lab as a Service) resources are also used (hpe16, hpe17, hpe19)
+
+Test case implementation for the three use cases has started.
+
+
+Release Data
+============
+
++--------------------------------------+--------------------------------------+
+| **Project** | Fraser/auto/auto@opnfv |
+| | |
++--------------------------------------+--------------------------------------+
+| **Repo/commit-ID** | |
+| | |
++--------------------------------------+--------------------------------------+
+| **Release designation** | Fraser 6.0 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Release date** | 2018-04-20 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Purpose of the delivery** | Official OPNFV release |
+| | |
++--------------------------------------+--------------------------------------+
+
+Version change
+^^^^^^^^^^^^^^
+
+Module version changes
+~~~~~~~~~~~~~~~~~~~~~~
+- There have been no version changes.
+
+
+Document version changes
+~~~~~~~~~~~~~~~~~~~~~~~~
+- There have been no version changes.
+
+
+Reason for version
+^^^^^^^^^^^^^^^^^^
+Feature additions
+~~~~~~~~~~~~~~~~~
+
+Initial release, with use case descriptions, release plan, and in-progress test cases and ONAP installations.
+
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| AUTO-1 | Define Auto-UC-01 Service Provider's |
+| | Management of Edge Cloud |
++--------------------------------------+--------------------------------------+
+| AUTO-2 | Define Auto-UC-02 Resilience |
+| | Improvements through ONAP |
++--------------------------------------+--------------------------------------+
+| AUTO-7 | Define Auto-UC-03 Enterprise vCPE |
+| | |
++--------------------------------------+--------------------------------------+
+| AUTO-4 | Develop test cases for Auto-UC-02 |
+| | Resilience Improvements through ONAP |
++--------------------------------------+--------------------------------------+
+| AUTO-8 | Develop test cases for Auto-UC-03 |
+| | Enterprise vCPE |
++--------------------------------------+--------------------------------------+
+
+
+Bug corrections
+~~~~~~~~~~~~~~~
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+
+Deliverables
+============
+
+Software deliverables
+^^^^^^^^^^^^^^^^^^^^^
+
+Initial release: in-progress install scripts and test case implementations.
+
+
+Documentation deliverables
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Initial versions of:
+
+* User guide `OPNFV User and Configuration Guide <http://docs.opnfv.org/en/latest/release/userguide.introduction.html>`_
+* Release notes (this document)
+
+
+
+Known Limitations, Issues and Workarounds
+=========================================
+
+System Limitations
+^^^^^^^^^^^^^^^^^^
+
+* ONAP still to be validated for Arm servers
+* DCAE still to be validated for Kubernetes
+
+
+
+Known issues
+^^^^^^^^^^^^
+
+None at this point.
+
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+
+Workarounds
+^^^^^^^^^^^
+
+None at this point.
+
+
+
+Test Result
+===========
+
+None at this point.
+
+
+
++--------------------------------------+--------------------------------------+
+| **TEST-SUITE** | **Results:** |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+
+References
+==========
+
+For more information on the OPNFV Fraser release, please see:
+http://opnfv.org/fraser
+
+Auto Wiki:
+https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
diff --git a/docs/release/release-notes/auto-proj-rn01.png b/docs/release/release-notes/auto-proj-rn01.png
new file mode 100644
index 0000000..65e4aa6
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-rn01.png
Binary files differ
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
new file mode 100644
index 0000000..7a70167
--- /dev/null
+++ b/docs/release/release-notes/index.rst
@@ -0,0 +1,15 @@
+.. _auto-releasenotes:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+===============================================
+OPNFV Auto (ONAP-Automated OPNFV) Release Notes
+===============================================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 2
+
+ Auto-release-notes.rst
diff --git a/docs/release/userguide/UC01-feature.userguide.rst b/docs/release/userguide/UC01-feature.userguide.rst
new file mode 100644
index 0000000..5cf38e1
--- /dev/null
+++ b/docs/release/userguide/UC01-feature.userguide.rst
@@ -0,0 +1,75 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+======================================
+Auto User Guide: Use Case 1 Edge Cloud
+======================================
+
+This document provides the user guide for Fraser release of Auto,
+specifically for Use Case 1: Edge Cloud.
+
+
+Description
+===========
+
+This use case aims at showcasing the benefits of using ONAP for autonomous Edge Cloud management.
+
+A high level of automation of VNF lifecycle event handling after launch is enabled by ONAP policies and closed-loop controls, which take care of most lifecycle events (start, stop, scale up/down/in/out, recovery/migration for HA) as well as their monitoring and SLA management.
+
+Multiple types of VNFs, for different execution environments, are first approved in the catalog thanks to the onboarding process, and then can be deployed and handled by multiple controllers in a systematic way.
+
+This results in management efficiency (lower control/automation overhead) and high degree of autonomy.
+
+
+Preconditions:
+
+#. hardware environment in which Edge cloud may be deployed
+#. an Edge cloud has been deployed and is ready for operation
+#. ONAP has been deployed onto a Cloud, and is interfaced (i.e. provisioned for API access) to the Edge cloud
+
+
+
+Main Success Scenarios:
+
+* lifecycle management - stop, stop, scale (dependent upon telemetry)
+
+* recovering from faults (detect, determine appropriate response, act); i.e. exercise closed-loop policy engine in ONAP
+
+ * verify mechanics of control plane interaction
+
+* collection of telemetry for machine learning
+
+
+Details on the test cases corresponding to this use case:
+
+* Environment check
+
+ * Basic environment check: Create test script to check basic VIM (OpenStack), ONAP, and VNF are up and running
+
+* VNF lifecycle management
+
+ * VNF Instance Management: Validation of VNF Instance Management which includes VNF instantiation, VNF State Management and termination
+
+ * Tacker Monitoring Driver (VNFMonitorPing):
+
+ * Write Tacker Monitor driver to handle monitor_call and based on return state value create custom events
+ * If Ping to VNF fails, trigger below events
+
+ * Event 1 : Collect failure logs from VNF
+ * Event 2 : Soft restart/respawn the VNF
+
+ * Integrate with Telemetry
+
+ * Create TOSCA template policies to implement ceilometer data collection service
+ * Collect CPU utilization data, compare with threshold, and perform action accordingly (respawn, scale-in/scale-out)
+
+
+
+Test execution high-level description
+=====================================
+
+<TBC>
+
diff --git a/docs/release/userguide/UC02-feature.userguide.rst b/docs/release/userguide/UC02-feature.userguide.rst
new file mode 100644
index 0000000..0ecb7de
--- /dev/null
+++ b/docs/release/userguide/UC02-feature.userguide.rst
@@ -0,0 +1,134 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+================================================================
+Auto User Guide: Use Case 2 Resiliency Improvements Through ONAP
+================================================================
+
+This document provides the user guide for Fraser release of Auto, specifically for Use Case 2: Resiliency Improvements Through ONAP.
+
+
+Description
+===========
+
+This use case illustrates VNF failure recovery time reduction with ONAP, thanks to its automated monitoring and management. It:
+
+* simulates an underlying problem (failure, stress, or any adverse condition in the network that can impact VNFs)
+* tracks a VNF
+* measures the amount of time it takes for ONAP to restore the VNF functionality.
+
+The benefit for NFV edge service providers is to assess what degree of added VIM+NFVI platform resilience for VNFs is obtained by leveraging ONAP closed-loop control, vs. VIM+NFVI self-managed resilience (which may not be aware of the VNF or the corresponding end-to-end Service, but only of underlying resources such as VMs and servers).
+
+
+Preconditions:
+
+#. hardware environment in which Edge cloud may be deployed
+#. Edge cloud has been deployed and is ready for operation
+#. ONAP has been deployed onto a cloud and is interfaced (i.e. provisioned for API access) to the Edge cloud
+#. Components of ONAP have been deployed on the Edge cloud as necessary for specific test objectives
+
+In future releases, Auto Use cases will also include the deployment of ONAP (if not already installed), the deployment of test VNFs (pre-existing VNFs in pre-existing ONAP can be used in the test as well), the configuration of ONAP for monitoring these VNFs (policies, CLAMP, DCAE), in addition to the test scripts which simulate a problem and measures recovery time.
+
+Different types of problems can be simulated, hence the identification of multiple test cases corresponding to this use case, as illustrated in this diagram:
+
+.. image:: auto-UC02-testcases.jpg
+
+Description of simulated problems/challenges:
+
+* Physical Infra Failure
+
+ * Migration upon host failure: Compute host power is interrupted, and affected workloads are migrated to other available hosts.
+ * Migration upon disk failure: Disk volumes are unmounted, and affected workloads are migrated to other available hosts.
+ * Migration upon link failure: Traffic on links is interrupted/corrupted, and affected workloads are migrated to other available hosts.
+ * Migration upon NIC failure: NIC ports are disabled by host commands, and affected workloads are migrated to other available hosts.
+
+* Virtual Infra Failure
+
+ * OpenStack compute host service fail: Core OpenStack service processes on compute hosts are terminated, and auto-restored, or affected workloads are migrated to other available hosts.
+ * SDNC service fail: Core SDNC service processes are terminated, and auto-restored.
+ * OVS fail: OVS bridges are disabled, and affected workloads are migrated to other available hosts.
+ * etc.
+
+* Security
+
+ * Host tampering: Host tampering is detected, the host is fenced, and affected workloads are migrated to other available hosts.
+ * Host intrusion: Host intrusion attempts are detected, an offending workload, device, or flow is identified and fenced, and as needed affected workloads are migrated to other available hosts.
+ * Network intrusion: Network intrusion attempts are detected, and an offending flow is identified and fenced.
+
+
+
+
+Test execution high-level description
+=====================================
+
+The following two MSCs (Message Sequence Charts) show the actors and high-level interactions.
+
+The first MSC shows the preparation activities (assuming the hardware, network, cloud, and ONAP have already been installed): onboarding and deployment of VNFs (via ONAP portal and modules in sequence: SDC, VID, SO), and ONAP configuration (policy framework, closed-loops in CLAMP, activation of DCAE).
+
+.. image:: auto-UC02-preparation.jpg
+
+
+The second MSC illustrates the pattern of all test cases for the Resiliency Improvements:
+
+* simulate the chosen problem (a.k.a. a "Challenge") for this test case, for example suspend a VM which may be used by a VNF
+* start tracking the target VNF of this test case
+* measure the ONAP-orchestrated VNF Recovery Time
+* then the test stops simulating the problem (for example: resume the VM that was suspended),
+
+In parallel, the MSC also shows the sequence of events happening in ONAP, thanks to its configuration to provide Service Assurance for the VNF.
+
+.. image:: auto-UC02-pattern.jpg
+
+
+Test design: data model, implementation modules
+===============================================
+
+The high-level design of classes identifies several entities:
+
+* Test Case: as identified above, each is a special case of the overall use case (e.g., categorized by challenge type)
+* Test Definition: gathers all the information necessary to run a certain test case
+* Metric Definition: describes a certain metric that may be measured, in addition to Recovery Time
+* Challenge Definition: describe the challenge (problem, failure, stress, ...) simulated by the test case
+* Recipient: entity that can receive commands and send responses, and that is queried by the Test Definition or Challenge Definition (a recipient would be typically a management service, with interfaces (CLI or API) for clients to query)
+* Resources: with 3 types (VNF, cloud virtual resource such as a VM, physical resource such as a server)
+
+
+Three of these entities have execution-time corresponding classes:
+
+* Test Execution, which captures all the relevant data of the execution of a Test Definition
+* Challenge Execution, which captures all the relevant data of the execution of a Challenge Definition
+* Metric Value, which captures the a quantitative measurement of a Metric Definition (with a timestamp)
+
+.. image:: auto-UC02-data1.jpg
+
+
+The following diagram illustrates an implementation-independent design of the attributes of these entities:
+
+.. image:: auto-UC02-data2.jpg
+
+
+This next diagram shows the Python classes and attributes, as implemented by this Use Case (for all test cases):
+
+.. image:: auto-UC02-data3.jpg
+
+
+Test definition data is stored in serialization files (Python pickles), while test execution data is stored in CSV files, for easier post-analysis.
+
+The module design is straightforward: functions and classes for managing data, for interfacing with recipients, for executing tests, and for interacting with the test user (choosing a Test Definition, showing the details of a Test Definition, starting the execution).
+
+.. image:: auto-UC02-module1.jpg
+
+
+This last diagram shows the test user menu functions:
+
+.. image:: auto-UC02-module2.jpg
+
+
+In future releases of Auto, testing environments such as FuncTest and Yardstick might be leveraged.
+
+Also, anonymized test results could be collected from users willing to share them, and aggregates could be
+maintained as benchmarks.
+
diff --git a/docs/release/userguide/UC03-feature.userguide.rst b/docs/release/userguide/UC03-feature.userguide.rst
new file mode 100644
index 0000000..5f28158
--- /dev/null
+++ b/docs/release/userguide/UC03-feature.userguide.rst
@@ -0,0 +1,79 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+===========================================
+Auto User Guide: Use Case 3 Enterprise vCPE
+===========================================
+
+This document provides the user guide for Fraser release of Auto,
+specifically for Use Case 3: Enterprise vCPE.
+
+
+Description
+===========
+
+This Use Case shows how ONAP can help ensuring that virtual CPEs (including vFW: virtual firewalls) in Edge Cloud are enterprise-grade.
+
+ONAP operations include a verification process for VNF onboarding (i.e. inclusion in the ONAP catalog), with multiple Roles (designer, tester, governor, operator), responsible for approving proposed VNFs (as VSPs (Vendor Software Products), and eventually as end-to-end Services).
+
+This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only chosen from such an approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet enterprise-grade requirements. ONAP provides a NBI in addition to a standard portal, thus enabling a programmatic deployment of VNFs, still conforming to ONAP processes.
+
+Moreover, ONAP also comprises real-time monitoring (by the DCAE component), which monitors performance for SLAs, can adjust allocated resources accordingly (elastic adjustment at VNF level), and can ensure High Availability.
+
+DCAE executes directives coming from policies described in the Policy Framework, and closed-loop controls described in the CLAMP component.
+
+Finally, this automated approach also reduces costs, since repetitive actions are designed once and executed multiple times, as vCPEs are instantiated and decommissioned (frequent events, given the variability of business activity, and a Small Business market similar to the Residential market: many contract updates resulting in many vCPE changes).
+
+NFV edge service providers need to provide site2site, site2dc (Data Center) and site2internet services to tenants both efficiently and safely, by deploying such qualified enterprise-grade vCPE.
+
+
+Preconditions:
+
+#. hardware environment in which Edge cloud may be deployed
+#. an Edge cloud has been deployed and is ready for operation
+#. enterprise edge devices, such as ThinCPE, have access to the Edge cloud with WAN interfaces
+#. ONAP components (MSO, SDN-C, APP-C and VNFM) have been deployed onto a cloud and are interfaced (i.e. provisioned for API access) to the Edge cloud
+
+
+Main Success Scenarios:
+
+* VNF spin-up
+
+ * vCPE spin-up: MSO calls the VNFM to spin up a vCPE instance from the catalog and then updates the active VNF list
+ * vFW spin-up: MSO calls the VNFM to spin up a vFW instance from the catalog and then updates the active VNF list
+
+* site2site
+
+ * L3VPN service subscribing: MSO calls the SDNC to create VXLAN tunnels to carry L2 traffic between client's ThinCPE and SP's vCPE, and enables vCPE to route between different sites.
+ * L3VPN service unsubscribing: MSO calls the SDNC to destroy tunnels and routes, thus disable traffic between different sites.
+
+
+See `ONAP description of vCPE use case <https://wiki.onap.org/display/DW/Use+Case+proposal%3A+Enterprise+vCPE>`_ for more details, including MSCs.
+
+
+Details on the test cases corresponding to this use case:
+
+* VNF Management
+
+ * Spin up a vCPE instance: Spin up a vCPE instance, by calling NBI of the orchestrator.
+ * Spin up a vFW instance: Spin up a vFW instance, by calling NBI of the orchestrator.
+
+* VPN as a Service
+
+ * Subscribe to a VPN service: Subscribe to a VPN service, by calling NBI of the orchestrator.
+ * Unsubscribe to a VPN service: Unsubscribe to a VPN service, by calling NBI of the orchestrator.
+
+* Internet as a Service
+
+ * Subscribe to an Internet service: Subscribe to an Internet service, by calling NBI of the orchestrator.
+ * Unsubscribe to an Internet service: Unsubscribe to an Internet service, by calling NBI of the orchestrator.
+
+
+Test execution high-level description
+=====================================
+
+<TBC>
+
diff --git a/docs/release/userguide/auto-UC02-control-loop-flow.png b/docs/release/userguide/auto-UC02-control-loop-flow.png
new file mode 100644
index 0000000..b234ece
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-control-loop-flow.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-data1.jpg b/docs/release/userguide/auto-UC02-data1.jpg
new file mode 100644
index 0000000..02a60ba
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-data1.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-data2.jpg b/docs/release/userguide/auto-UC02-data2.jpg
new file mode 100644
index 0000000..7096c96
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-data2.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-data3.jpg b/docs/release/userguide/auto-UC02-data3.jpg
new file mode 100644
index 0000000..8e8921d
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-data3.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-module1.jpg b/docs/release/userguide/auto-UC02-module1.jpg
new file mode 100644
index 0000000..184ab95
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-module1.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-module2.jpg b/docs/release/userguide/auto-UC02-module2.jpg
new file mode 100644
index 0000000..b95f42d
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-module2.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-pattern.jpg b/docs/release/userguide/auto-UC02-pattern.jpg
new file mode 100644
index 0000000..b2c9dee
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-pattern.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-preparation.jpg b/docs/release/userguide/auto-UC02-preparation.jpg
new file mode 100644
index 0000000..e2c0ba5
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-preparation.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-testcases.jpg b/docs/release/userguide/auto-UC02-testcases.jpg
new file mode 100644
index 0000000..ccb676f
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-testcases.jpg
Binary files differ
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
new file mode 100644
index 0000000..7cfbe94
--- /dev/null
+++ b/docs/release/userguide/index.rst
@@ -0,0 +1,27 @@
+.. _auto-userguide:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+============================================
+OPNFV Auto (ONAP-Automated OPNFV) User Guide
+============================================
+
+.. The feature user guide should provide an OPNFV user with enough information to
+.. use the features provided by the feature project in the supported scenarios.
+.. This guide should walk a user through the usage of the features once a scenario
+.. has been deployed and is active according to the installation guide provided
+.. by the installer project.
+
+.. toctree::
+ :numbered:
+ :maxdepth: 2
+
+ UC01-feature.userguide.rst
+ UC02-feature.userguide.rst
+ UC03-feature.userguide.rst
+
+.. The feature.userguide.rst files should contain the text for this document
+.. additional documents can be added to this directory and added in the right order
+.. to this file as a list below.
diff --git a/lib/auto/__init__.py b/lib/auto/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/auto/__init__.py
diff --git a/lib/auto/testcase/resiliency/AutoResilGlobal.py b/lib/auto/testcase/resiliency/AutoResilGlobal.py
new file mode 100644
index 0000000..1a59f4b
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilGlobal.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: global variables (list of definition data)
+
+#docstring
+"""This module contains global variable for OPNFV Auto Test Data for Use Case 2: Resilience Improvements Through ONAP.
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+
+######################################################################
+# import statements
+
+
+# global variables
+test_case_list = None
+test_definition_list = None
+recipient_list = None
+challenge_definition_list = None
+metric_definition_list = None
+physical_resource_list = None
+cloud_virtual_resource_list = None
+VNF_Service_list = None
+
diff --git a/lib/auto/testcase/resiliency/AutoResilItfCloud.py b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
new file mode 100644
index 0000000..69c5327
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
@@ -0,0 +1,227 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with cloud managers (OpenStack, Kubernetes, AWS, ...)
+
+
+######################################################################
+# import statements
+import AutoResilGlobal
+
+# for method 1 and 2
+#import openstack
+
+#for method 3
+from openstack import connection
+
+def os_list_servers(conn):
+ """List OpenStack servers."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/compute.html
+ if conn != None:
+ print("\nList Servers:")
+
+ try:
+ i=1
+ for server in conn.compute.servers():
+ print('Server',str(i),'\n',server,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Servers\n")
+
+
+def os_list_networks(conn):
+ """List OpenStack networks."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/network.html
+ if conn != None:
+ print("\nList Networks:")
+
+ try:
+ i=1
+ for network in conn.network.networks():
+ print('Network',str(i),'\n',network,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Networks\n")
+
+
+def os_list_volumes(conn):
+ """List OpenStack volumes."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/block_storage.html
+ # note: The block_storage member will only be added if the service is detected.
+ if conn != None:
+ print("\nList Volumes:")
+
+ try:
+ i=1
+ for volume in conn.block_storage.volumes():
+ print('Volume',str(i),'\n',volume,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Volumes\n")
+
+
+def os_list_users(conn):
+ """List OpenStack users."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
+ if conn != None:
+ print("\nList Users:")
+
+ try:
+ i=1
+ for user in conn.identity.users():
+ print('User',str(i),'\n',user,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Users\n")
+
+def os_list_projects(conn):
+ """List OpenStack projects."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
+ if conn != None:
+ print("\nList Projects:")
+
+ try:
+ i=1
+ for project in conn.identity.projects():
+ print('Project',str(i),'\n',project,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Projects\n")
+
+
+def os_list_domains(conn):
+ """List OpenStack domains."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
+ if conn != None:
+ print("\nList Domains:")
+
+ try:
+ i=1
+ for domain in conn.identity.domains():
+ print('Domain',str(i),'\n',domain,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Domains\n")
+
+
+
+
+
+
+
+def gdtest_openstack():
+ # Method 1: assume there is a clouds.yaml file in PATH, starting path search with local directory
+ #conn = openstack.connect(cloud='armopenstack', region_name='RegionOne')
+ #conn = openstack.connect(cloud='hpe16openstack', region_name='RegionOne')
+ # getting error: AttributeError: module 'openstack' has no attribute 'connect'
+
+ # Method 2: pass arguments directly, all as strings
+ # see details at https://docs.openstack.org/python-openstacksdk/latest/user/connection.html
+ # conn = openstack.connect(
+ # auth_url='https://10.10.50.103:5000/v2.0',
+ # project_name='admin',
+ # username='admin',
+ # password='opnfv_secret',
+ # region_name='RegionOne',
+ # )
+ # conn = openstack.connect(
+ # auth_url='http://10.16.0.101:5000/v2.0',
+ # project_name='admin',
+ # username='admin',
+ # password='opnfv_secret',
+ # region_name='RegionOne',
+ # )
+ # getting error: AttributeError: module 'openstack' has no attribute 'connect'
+
+ # Method 3: create Connection object directly
+ auth_args = {
+ #'auth_url': 'https://10.10.50.103:5000/v2.0', # Arm
+ #'auth_url': 'http://10.16.0.101:5000/v2.0', # hpe16, Euphrates
+ 'auth_url': 'http://10.16.0.107:5000/v3', # hpe16, Fraser
+ 'project_name': 'admin',
+ 'username': 'admin',
+ 'password': 'opnfv_secret',
+ 'region_name': 'RegionOne',
+ 'domain': 'Default'}
+ conn = connection.Connection(**auth_args)
+
+ #conn = connection.Connection(
+ #auth_url='http://10.16.0.107:5000/v3',
+ #project_name='admin',
+ #username='admin',
+ #password='opnfv_secret')
+
+
+ os_list_servers(conn)
+ os_list_networks(conn)
+ os_list_volumes(conn)
+ os_list_users(conn)
+ os_list_projects(conn)
+ os_list_domains(conn)
+
+
+ # get_server(server): Get a single Server
+ # Parameters: server – The value can be the ID of a server or a Server instance.
+ # conn.compute.get_server(server)
+
+ # suspend_server(server): Suspends a server and changes its status to SUSPENDED.
+ # Parameters: server – Either the ID of a server or a Server instance.
+ # conn.compute.suspend_server(server)
+
+ # resume_server(server): Resumes a suspended server and changes its status to ACTIVE.
+ # Parameters: server – Either the ID of a server or a Server instance.
+ # conn.compute.resume_server(server)
+
+
+def main():
+
+ print("\nTest Auto Cloud Interface")
+
+ gdtest_openstack()
+
+ print("Ciao\n")
+
+if __name__ == "__main__":
+ main()
+
+
+# OpenStack HTTP API: https://developer.openstack.org/api-ref/compute/
+#{your_compute_service_url}/servers/{server_id}/action
+#GET
+#http://mycompute.pvt/compute/v2.1/servers/{server_id}/suspend
+#http://mycompute.pvt/compute/v2.1/servers/{server_id}/resume
+# but better use the python unified client
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilItfOS.py b/lib/auto/testcase/resiliency/AutoResilItfOS.py
new file mode 100644
index 0000000..5f792f6
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilItfOS.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with OS, or servers
+
+
+######################################################################
+# import statements
+import AutoResilGlobal
+
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py b/lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py
new file mode 100644
index 0000000..494d0ab
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with VNF/NVF managers (focus on ONAP)
+# entities that manage VNFs and orchestrates services (VNF-M and NFV-O)
+
+######################################################################
+# import statements
+import AutoResilGlobal
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilMain.py b/lib/auto/testcase/resiliency/AutoResilMain.py
new file mode 100644
index 0000000..2f67bdf
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilMain.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: main program
+# data initialization
+# interactive CLI user menu:
+# 1) select a test definition to run
+# 2) view definition of selected test (pull all data from definition files)
+# 3) start test
+# 4) exit
+
+
+#docstring
+"""This is the main module for OPNFV Auto Test Data for Use Case 2: Resilience Improvements Through ONAP.
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+
+
+
+######################################################################
+# import statements
+import AutoResilGlobal
+from AutoResilMgTestDef import *
+
+# Constants
+PROJECT_NAME = "Auto"
+USE_CASE_NAME = "Resilience Improvements Through ONAP"
+
+
+
+######################################################################
+
+def show_menu(selected_test_def_ID):
+ """Show menu, with a different first line based on current Test Definition selection."""
+
+ if selected_test_def_ID>0 :
+ print("\nCurrently selected test Definition ID: ",selected_test_def_ID)
+ else:
+ print("\nCurrently selected test Definition ID: (none)")
+ print("1: select Test Definition ID")
+ print("2: view currently selected Test Definition details")
+ print("3: start an execution of currently selected Test Definition")
+ print("4: exit")
+
+
+def get_menu_choice():
+ """Get a user input (a menu entry number)."""
+ while True:
+ try:
+ user_choice = int(input(" Choice: "))
+ except ValueError:
+ print(" Invalid choice (must be an integer). Try again.")
+ continue
+ if user_choice < 1 or user_choice > 4:
+ print(" Invalid choice (must be between 1 and 4). Try again.")
+ continue
+ else:
+ return user_choice
+
+
+def get_test_def_ID():
+ """Get a user input (a test definition ID)."""
+ while True:
+ try:
+ user_test_def_ID = int(input(" Test Definition ID: "))
+ except ValueError:
+ print(" Invalid choice (must be an integer). Try again.")
+ continue
+ if user_test_def_ID <1:
+ print(" Invalid choice (must be a positive integer). Try again.")
+ continue
+
+ test_defs = read_list_bin(FILE_TEST_DEFINITIONS)
+ if (test_defs == None) or (test_defs==[]):
+ print("Problem with test definition file: empty")
+ sys.exit() # stop entire program, because test definition file MUST be correct
+
+ if index_already_there(user_test_def_ID, test_defs):
+ return user_test_def_ID
+ else:
+ print("Invalid choice (Test Definition ID",user_test_def_ID,"does not exist). Try again.")
+ continue
+
+
+
+######################################################################
+def main():
+
+ print("\nProject:\t", PROJECT_NAME)
+ print("Use Case:\t",USE_CASE_NAME)
+
+
+ # Run initializations, to refresh data and make sure files are here. Also, this loads the lists in memory.
+ # For now, initialization functions are self-contained and hard-coded:
+ # all definition data is initialized from the code, not from user interaction.
+ AutoResilGlobal.test_case_list = init_test_cases()
+ AutoResilGlobal.test_definition_list = init_test_definitions()
+ AutoResilGlobal.recipient_list = init_recipients()
+ AutoResilGlobal.challenge_definition_list = init_challenge_definitions()
+ AutoResilGlobal.metric_definition_list = init_metric_definitions()
+
+ AutoResilGlobal.physical_resource_list = init_physical_resources()
+ AutoResilGlobal.cloud_virtual_resource_list = init_cloud_virtual_resources()
+ AutoResilGlobal.VNF_Service_list = init_VNFs_Services()
+
+
+ # start with no test definition selected
+ selected_test_def_ID = -1
+
+ # interactive menu loop
+ while True:
+
+ show_menu(selected_test_def_ID)
+ user_choice = get_menu_choice()
+ #print("***user_choice:",user_choice) #debug
+
+ if user_choice == 1: # select Test Definition ID
+ selected_test_def_ID = get_test_def_ID()
+ selected_test_def = get_indexed_item_from_list(selected_test_def_ID, AutoResilGlobal.test_definition_list)
+ continue
+
+ if user_choice == 2: # view currently selected Test Definition details
+ if selected_test_def_ID > 0:
+ if selected_test_def == None:
+ print("Problem with test definition: empty")
+ sys.exit() # stop entire program, because test definition MUST be correct
+ else:
+ selected_test_def.printout_all(0)
+ continue
+ else:
+ print("No current selection of Test Definition. Try again.")
+ continue
+
+ if user_choice == 3: # start an execution of currently selected Test Definition
+ if selected_test_def_ID > 0:
+ if selected_test_def == None:
+ print("Problem with test definition: empty")
+ sys.exit() # stop entire program, because test definition MUST be correct
+ else:
+ # TODO run test: call selected test definition run_test_code() method
+ test_def = get_indexed_item_from_list(selected_test_def_ID, AutoResilGlobal.test_definition_list)
+ if test_def != None:
+ test_def.run_test_code()
+
+ else:
+ print("No current selection of Test Definition. Try again.")
+ continue
+
+ if user_choice == 4: # exit
+ print("\nEnd of Main Program")
+ print("\nProject:\t", PROJECT_NAME)
+ print("Use Case:\t",USE_CASE_NAME)
+ print("\nBye!\n")
+ sys.exit()
+
+
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/lib/auto/testcase/resiliency/AutoResilMgTestDef.py b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
new file mode 100644
index 0000000..9667f93
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
@@ -0,0 +1,1723 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: management of test definitions
+
+# Functions and classes to manage and initialize test data relative to:
+# physical resources
+# cloud resources
+# VNFs
+# recipients (OS, cloud/VNF managers)
+# challenge definitions
+# optional metrics
+# test definitions
+# Storage of definition data in binary files (pickle), and test data results in .CSV files
+
+
+#docstring
+"""This module contains functions and classes to manage OPNFV Auto Test Data for Use Case 2: Resilience Improvements Through ONAP.
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+
+######################################################################
+# import statements
+import pickle
+import csv
+import sys
+from enum import Enum
+from datetime import datetime, timedelta
+import AutoResilGlobal
+#import openstack
+
+# Constants with definition file names
+FILE_PHYSICAL_RESOURCES = "ResourcesPhysical.bin"
+FILE_CLOUD_RESOURCES = "ResourcesCloud.bin"
+FILE_VNFS_SERVICES = "ResourcesVNFServices.bin"
+FILE_RECIPIENTS = "Recipients.bin"
+FILE_TEST_CASES = "TestCases.bin"
+FILE_METRIC_DEFINITIONS = "DefinitionsMetrics.bin"
+FILE_CHALLENGE_DEFINITIONS = "DefinitionsChallenges.bin"
+FILE_TEST_DEFINITIONS = "DefinitionsTests.bin"
+
+# Other constants
+INDENTATION_MULTIPLIER = 4
+
+
+######################################################################
+
+def read_list_bin(file_name):
+ """Generic function to extract a list from a binary file."""
+ try:
+ extracted_list = []
+ with open(file_name, "rb") as binary_file:
+ extracted_list = pickle.load(binary_file)
+ return extracted_list
+ except FileNotFoundError:
+ print("File not found: ",file_name)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+def write_list_bin(inserted_list, file_name):
+ """Generic function to write a list to a binary file (replace content)."""
+ try:
+ with open(file_name, "wb") as binary_file:
+ pickle.dump(inserted_list, binary_file)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+class AutoBaseObject:
+ """Base class for Auto project, with common attributes (ID, name)."""
+ def __init__ (self, param_ID, param_name):
+ self.ID = param_ID
+ self.name = param_name
+ # for display
+ def __repr__(self):
+ return ("ID="+str(self.ID)+" name="+self.name)
+ # for print
+ def __str__(self):
+ return ("ID="+str(self.ID)+" name="+self.name)
+
+
+def index_already_there(index, given_list):
+ """Generic function to check if an index already exists in a list of AutoBaseObject."""
+
+ # check if ID already exists
+ already_there = False
+ if len(given_list)>0:
+ for item in given_list:
+ if isinstance(item, AutoBaseObject):
+ if item.ID == index:
+ already_there = True
+ break
+ else:
+ print("Issue with list: item is not AutoBaseObject")
+ print(" index=\n",index)
+ sys.exit()
+ return already_there
+
+
+def get_indexed_item_from_list(index, given_list):
+ """Generic function to get an indexed entry from a list of AutoBaseObject."""
+
+ returned_item = None
+
+ if len(given_list)>0:
+ for item in given_list:
+ if isinstance(item, AutoBaseObject):
+ if item.ID == index:
+ returned_item = item
+ break
+ else:
+ print("Issue with list: item is not AutoBaseObject")
+ print(" index=\n",index)
+ sys.exit()
+ return returned_item
+
+
+def get_indexed_item_from_file(index, file_name):
+ """Generic function to get an indexed entry from a list of AutoBaseObject stored in a binary file."""
+
+ list_in_file = read_list_bin(file_name)
+ return get_indexed_item_from_list(index, list_in_file)
+
+
+
+######################################################################
+
+class TestCase(AutoBaseObject):
+ """Test Case class for Auto project."""
+ def __init__ (self, test_case_ID, test_case_name,
+ test_case_JIRA_URL):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, test_case_ID, test_case_name)
+
+ # specifics for this subclass
+
+ # Auto JIRA link
+ self.JIRA_URL = test_case_JIRA_URL
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Test Case ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-JIRA URL:", self.JIRA_URL, sep='')
+
+
+# no need for functions to remove data: ever-growing library, arbitrary ID
+# initial version: should not even add data dynamically, in case object signature changes
+# better stick to initialization functions only to fill data, unless 100% sure signature does not change
+def add_test_case_to_file(test_case_ID, test_case_name, test_case_JIRA_URL):
+ """Function to add persistent data about test cases (in binary file)."""
+
+ test_cases = read_list_bin(FILE_TEST_CASES)
+
+ if index_already_there(test_case_ID, test_cases):
+ print("Test Case ID=",test_case_ID," is already defined and can't be added")
+ else:
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+ write_list_bin(test_cases, FILE_TEST_CASES)
+
+ return test_cases
+
+
+
+def init_test_cases():
+ """Function to initialize test case data."""
+ test_cases = []
+
+ # add info to list in memory, one by one, following signature values
+ test_case_ID = 1
+ test_case_name = "auto-resiliency-pif-001"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-9"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 2
+ test_case_name = "auto-resiliency-pif-002"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-10"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 3
+ test_case_name = "auto-resiliency-pif-003"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-11"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 4
+ test_case_name = "auto-resiliency-pif-004"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-12"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 5
+ test_case_name = "auto-resiliency-vif-001"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-13"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 6
+ test_case_name = "auto-resiliency-vif-002"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-14"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 7
+ test_case_name = "auto-resiliency-vif-003"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-15"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 8
+ test_case_name = "auto-resiliency-sec-001"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-16"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 9
+ test_case_name = "auto-resiliency-sec-002"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-17"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 10
+ test_case_name = "auto-resiliency-sec-003"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-18"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ # write list to binary file
+ write_list_bin(test_cases, FILE_TEST_CASES)
+
+ return test_cases
+
+
+######################################################################
+
+class TestDefinition(AutoBaseObject):
+ """Test Definition class for Auto project."""
+ def __init__ (self, test_def_ID, test_def_name,
+ test_def_challengeDefID,
+ test_def_testCaseID,
+ test_def_VNFIDs,
+ test_def_associatedMetricsIDs,
+ test_def_recipientIDs,
+ test_def_testCLICommandSent,
+ test_def_testAPICommandSent,
+ test_def_codeID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, test_def_ID, test_def_name)
+
+ # specifics for this subclass
+
+ # associated Challenge Definition (ID)
+ self.challenge_def_ID = test_def_challengeDefID
+ # associated Test Case (ID)
+ self.test_case_ID = test_def_testCaseID
+ # associated VNFs (list of IDs)
+ self.VNF_ID_list = test_def_VNFIDs
+ # associated Metrics (list of IDs)
+ self.associated_metrics_ID_list = test_def_associatedMetricsIDs
+ # associated Recipients (list of IDs)
+ self.recipient_ID_list = test_def_recipientIDs
+ # associated test CLI commands to Recipients (list of strings)
+ self.test_CLI_command_sent_list = test_def_testCLICommandSent
+ # associated test API commands to Recipients (list of data objects)
+ self.test_API_command_sent_list = test_def_testAPICommandSent
+
+ # constant for total number of test codes (one of them is used per TestDefinition instance); would be 1 per test case
+ self.TOTAL_NUMBER_OF_TEST_CODES = 10
+ # chosen test code ID (the ID is an index in a list of method names) for this instance; convention: [1;N]; in list, index is [0;N-1]
+ # a test code could use for instance Python clients (for OpenStack, Kubernetes, etc.), or HTTP APIs, or some of the CLI/API commands
+ try:
+ if 1 <= test_def_codeID <= self.TOTAL_NUMBER_OF_TEST_CODES:
+ self.test_code_ID = test_def_codeID
+ else:
+ print("TestDefinition constructor: incorrect test_def_codeID=",test_def_codeID)
+ sys.exit() # stop entire program, because code ID MUST be correct
+ except Exception as e:
+ print(type(e), e)
+ sys.exit() # stop entire program, because code ID MUST be correct
+
+ self.test_code_list = [] # list of method names; leave as per-object method (i.e. not as class methods or as static methods)
+ # add one by one, for easier later additions of new methods
+ self.test_code_list.append(self.test_code001)
+ self.test_code_list.append(self.test_code002)
+ self.test_code_list.append(self.test_code003)
+ self.test_code_list.append(self.test_code004)
+ self.test_code_list.append(self.test_code005)
+ self.test_code_list.append(self.test_code006)
+ self.test_code_list.append(self.test_code007)
+ self.test_code_list.append(self.test_code008)
+ self.test_code_list.append(self.test_code009)
+ self.test_code_list.append(self.test_code010)
+
+
+ def run_test_code(self):
+ """Run currently selected test code."""
+ try:
+ test_code_index = self.test_code_ID - 1 # lists are indexed from 0 to N-1
+ self.test_code_list[test_code_index]() # invoke corresponding method, via index
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+ # library of test codes, probably 1 per test case, so test_case_ID would be the same as test_code_ID
+ def test_code001(self):
+ """Test case code number 001."""
+ print("This is test_code001 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code002(self):
+ """Test case code number 002."""
+ print("This is test_code002 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code003(self):
+ """Test case code number 003."""
+ print("This is test_code003 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code004(self):
+ """Test case code number 004."""
+ print("This is test_code004 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code005(self):
+ """Test case code number 005."""
+ print("This is test_code005 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ # here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,
+ # monitoring of VNF, trigger stop code from challenge def, perform restoration of VNF
+ challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)
+ if challenge_def != None:
+ challenge_def.run_start_challenge_code()
+ challenge_def.run_stop_challenge_code()
+
+
+ def test_code006(self):
+ """Test case code number 006."""
+ print("This is test_code006 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code007(self):
+ """Test case code number 007."""
+ print("This is test_code007 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code008(self):
+ """Test case code number 008."""
+ print("This is test_code008 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code009(self):
+ """Test case code number 009."""
+ print("This is test_code009 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code010(self):
+ """Test case code number 010."""
+ print("This is test_code010 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "\nTest Definition ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-associated test case ID:", self.test_case_ID, sep='')
+ test_case = get_indexed_item_from_list(self.test_case_ID, AutoResilGlobal.test_case_list)
+ if test_case != None:
+ test_case.printout_all(indent_level+1)
+
+ print(indent, "|-test code ID:", self.test_code_ID, sep='')
+
+ print(indent, "|-associated challenge def ID:", self.challenge_def_ID, sep='')
+ challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)
+ if challenge_def != None:
+ challenge_def.printout_all(indent_level+1)
+
+ if self.VNF_ID_list != None:
+ if len(self.VNF_ID_list) >0:
+ print(indent, "|-associated VNFs:", sep='')
+ for VNF_ID in self.VNF_ID_list:
+ VNF_item = get_indexed_item_from_list(VNF_ID, AutoResilGlobal.VNF_Service_list)
+ if VNF_item != None:
+ VNF_item.printout_all(indent_level+1)
+
+ if self.associated_metrics_ID_list != None:
+ if len(self.associated_metrics_ID_list) >0:
+ print(indent, "|-associated metrics:", sep='')
+ for Metric_ID in self.associated_metrics_ID_list:
+ Metric_item = get_indexed_item_from_list(Metric_ID, AutoResilGlobal.metric_definition_list)
+ if Metric_item != None:
+ Metric_item.printout_all(indent_level+1)
+
+ if self.recipient_ID_list != None:
+ if len(self.recipient_ID_list) >0:
+ print(indent, "|-associated recipients:", sep='')
+ for recipient_ID in self.recipient_ID_list:
+ recipient_item = get_indexed_item_from_list(recipient_ID, AutoResilGlobal.recipient_list)
+ if recipient_item != None:
+ recipient_item.printout_all(indent_level+1)
+
+ if self.test_CLI_command_sent_list != None:
+ if len(self.test_CLI_command_sent_list) >0:
+ print(indent, "|-associated CLI commands:", sep='')
+ for CLI_command in self.test_CLI_command_sent_list:
+ print(" "*INDENTATION_MULTIPLIER, "|- ", CLI_command, sep='')
+
+ # TODO: self.test_API_command_sent_list (depends how API commands are stored: likely a list of strings)
+
+
+
+def init_test_definitions():
+ """Function to initialize test definition data."""
+ test_definitions = []
+
+ # add info to list in memory, one by one, following signature values
+ test_def_ID = 1
+ test_def_name = "VM failure impact on virtual firewall (vFW VNF)"
+ test_def_challengeDefID = 1
+ test_def_testCaseID = 5
+ test_def_VNFIDs = [1]
+ test_def_associatedMetricsIDs = [2]
+ test_def_recipientIDs = [2]
+ test_def_testCLICommandSent = ["pwd","kubectl describe pods --include-uninitialized=false"]
+ test_def_testAPICommandSent = ["data1","data2"]
+ test_def_testCodeID = 5
+ test_definitions.append(TestDefinition(test_def_ID, test_def_name,
+ test_def_challengeDefID,
+ test_def_testCaseID,
+ test_def_VNFIDs,
+ test_def_associatedMetricsIDs,
+ test_def_recipientIDs,
+ test_def_testCLICommandSent,
+ test_def_testAPICommandSent,
+ test_def_testCodeID))
+
+ # write list to binary file
+ write_list_bin(test_definitions, FILE_TEST_DEFINITIONS)
+
+ return test_definitions
+
+
+######################################################################
+
+class ChallengeType(Enum):
+ # server-level failures
+ COMPUTE_HOST_FAILURE = 100
+ DISK_FAILURE = 101
+ LINK_FAILURE = 102
+ NIC_FAILURE = 103
+ # network-level failures
+ OVS_BRIDGE_FAILURE = 200
+ # security stresses
+ HOST_TAMPERING = 300
+ HOST_INTRUSION = 301
+ NETWORK_INTRUSION = 302
+
+
+class ChallengeDefinition(AutoBaseObject):
+ """Challenge Definition class for Auto project."""
+ def __init__ (self, chall_def_ID, chall_def_name,
+ chall_def_challengeType,
+ chall_def_recipientID,
+ chall_def_impactedCloudResourcesInfo,
+ chall_def_impactedCloudResourceIDs,
+ chall_def_impactedPhysResourcesInfo,
+ chall_def_impactedPhysResourceIDs,
+ chall_def_startChallengeCLICommandSent,
+ chall_def_stopChallengeCLICommandSent,
+ chall_def_startChallengeAPICommandSent,
+ chall_def_stopChallengeAPICommandSent,
+ chall_def_codeID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, chall_def_ID, chall_def_name)
+
+ # specifics for this subclass
+
+ # info about challenge type, categorization
+ self.challenge_type = chall_def_challengeType
+ # recipient instance, to start/stop the challenge
+ self.recipient_ID = chall_def_recipientID
+
+ # free-form info about cloud virtual impacted resource(s)
+ self.impacted_cloud_resources_info = chall_def_impactedCloudResourcesInfo
+ # impacted resources (list of IDs, usually only 1)
+ self.impacted_cloud_resource_ID_list = chall_def_impactedCloudResourceIDs
+
+ # free-form info about physical impacted resource(s)
+ self.impacted_phys_resources_info = chall_def_impactedPhysResourcesInfo
+ # impacted resources (list of IDs, usually only 1)
+ self.impacted_phys_resource_ID_list = chall_def_impactedPhysResourceIDs
+
+ # if CLI; can include hard-coded references to resources
+ self.start_challenge_CLI_command_sent = chall_def_startChallengeCLICommandSent
+ # if CLI; to restore to normal
+ self.stop_challenge_CLI_command_sent = chall_def_stopChallengeCLICommandSent
+ # if API; can include hard-coded references to resources
+ self.start_challenge_API_command_sent = chall_def_startChallengeAPICommandSent
+ # if API; to restore to normal
+ self.stop_challenge_API_command_sent = chall_def_stopChallengeAPICommandSent
+
+ # constant for total number of challenge codes (one of them is used per ChallengeDefinition instance);
+ # may be 1 per test case, maybe not (common challenges, could be re-used across test definitions and test cases)
+ # start and stop challenges are strictly linked: exactly 1 Stop challenge for each Start challenge, so same ID for Start and for Stop
+ self.TOTAL_NUMBER_OF_CHALLENGE_CODES = 10
+
+ # chosen start/stop challenge code ID (the ID is an index in a list of method names) for this instance;
+ # convention: [1;N]; in list, index is [0;N-1]
+ # a challenge code could use for instance Python clients (for OpenStack, Kubernetes, etc.), or HTTP APIs, or some of the CLI/API commands
+ try:
+ if 1 <= chall_def_codeID <= self.TOTAL_NUMBER_OF_CHALLENGE_CODES:
+ self.challenge_code_ID = chall_def_codeID
+ else:
+ print("ChallengeDefinition constructor: incorrect chall_def_codeID=",chall_def_codeID)
+ sys.exit() # stop entire program, because code ID MUST be correct
+ except Exception as e:
+ print(type(e), e)
+ sys.exit() # stop entire program, because code ID MUST be correct
+
+ # list of method names; leave as per-object method (i.e. not as class methods or as static methods)
+ self.start_challenge_code_list = []
+ self.stop_challenge_code_list = []
+ # add one by one, for easier later additions of new methods; MUST be same index for Start and for Stop
+ self.start_challenge_code_list.append(self.start_challenge_code001)
+ self.stop_challenge_code_list.append(self.stop_challenge_code001)
+ self.start_challenge_code_list.append(self.start_challenge_code002)
+ self.stop_challenge_code_list.append(self.stop_challenge_code002)
+ self.start_challenge_code_list.append(self.start_challenge_code003)
+ self.stop_challenge_code_list.append(self.stop_challenge_code003)
+ self.start_challenge_code_list.append(self.start_challenge_code004)
+ self.stop_challenge_code_list.append(self.stop_challenge_code004)
+ self.start_challenge_code_list.append(self.start_challenge_code005)
+ self.stop_challenge_code_list.append(self.stop_challenge_code005)
+ self.start_challenge_code_list.append(self.start_challenge_code006)
+ self.stop_challenge_code_list.append(self.stop_challenge_code006)
+ self.start_challenge_code_list.append(self.start_challenge_code007)
+ self.stop_challenge_code_list.append(self.stop_challenge_code007)
+ self.start_challenge_code_list.append(self.start_challenge_code008)
+ self.stop_challenge_code_list.append(self.stop_challenge_code008)
+ self.start_challenge_code_list.append(self.start_challenge_code009)
+ self.stop_challenge_code_list.append(self.stop_challenge_code009)
+ self.start_challenge_code_list.append(self.start_challenge_code010)
+ self.stop_challenge_code_list.append(self.stop_challenge_code010)
+
+
+ def run_start_challenge_code(self):
+ """Run currently selected challenge code, start portion."""
+ try:
+ code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
+ self.start_challenge_code_list[code_index]() # invoke corresponding start method, via index
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+ def run_stop_challenge_code(self):
+ """Run currently selected challenge code, stop portion."""
+ try:
+ code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
+ self.stop_challenge_code_list[code_index]() # invoke corresponding stop method, via index
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+
+ # library of challenge codes
+ def start_challenge_code001(self):
+ """Start Challenge code number 001."""
+ print("This is start_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code001(self):
+ """Stop Challenge code number 001."""
+ print("This is stop_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code002(self):
+ """Start Challenge code number 002."""
+ print("This is start_challenge_code002 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code002(self):
+ """Stop Challenge code number 002."""
+ print("This is stop_challenge_code002 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code003(self):
+ """Start Challenge code number 003."""
+ print("This is start_challenge_code003 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code003(self):
+ """Stop Challenge code number 003."""
+ print("This is stop_challenge_code003 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code004(self):
+ """Start Challenge code number 004."""
+ print("This is start_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code004(self):
+ """Stop Challenge code number 004."""
+ print("This is stop_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code005(self):
+ """Start Challenge code number 005."""
+ print("This is start_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code005(self):
+ """Stop Challenge code number 005."""
+ print("This is stop_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code006(self):
+ """Start Challenge code number 006."""
+ print("This is start_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code006(self):
+ """Stop Challenge code number 006."""
+ print("This is stop_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code007(self):
+ """Start Challenge code number 007."""
+ print("This is start_challenge_code007 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code007(self):
+ """Stop Challenge code number 007."""
+ print("This is stop_challenge_code007 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code008(self):
+ """Start Challenge code number 008."""
+ print("This is start_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code008(self):
+ """Stop Challenge code number 008."""
+ print("This is stop_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code009(self):
+ """Start Challenge code number 009."""
+ print("This is start_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code009(self):
+ """Stop Challenge code number 009."""
+ print("This is stop_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code010(self):
+ """Start Challenge code number 010."""
+ print("This is start_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code010(self):
+ """Stop Challenge code number 010."""
+ print("This is stop_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
+
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Challenge Definition ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-challenge type:", self.challenge_type, sep='')
+
+ print(indent, "|-challenge code ID:", self.challenge_code_ID, sep='')
+
+ print(indent, "|-associated recipient ID:", self.recipient_ID, sep='')
+ recipient = get_indexed_item_from_list(self.recipient_ID, AutoResilGlobal.recipient_list)
+ if recipient != None:
+ recipient.printout_all(indent_level+1)
+
+ print(indent, "|-info about cloud virtual impacted resource(s):", self.impacted_cloud_resources_info, sep='')
+
+ if self.impacted_cloud_resource_ID_list != None:
+ if len(self.impacted_cloud_resource_ID_list) >0:
+ print(indent, "|-associated cloud virtual impacted resource(s):", sep='')
+ for cloud_resource_ID in self.impacted_cloud_resource_ID_list:
+ cloud_resource_item = get_indexed_item_from_list(cloud_resource_ID, AutoResilGlobal.cloud_virtual_resource_list)
+ if cloud_resource_item != None:
+ cloud_resource_item.printout_all(indent_level+1)
+
+ print(indent, "|-info about physical virtual impacted resource(s):", self.impacted_phys_resources_info, sep='')
+
+ if self.impacted_phys_resource_ID_list != None:
+ if len(self.impacted_phys_resource_ID_list) >0:
+ print(indent, "|-associated physical impacted resource(s):", sep='')
+ for phys_resource_ID in self.impacted_phys_resource_ID_list:
+ phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)
+ if phys_resource_item != None:
+ phys_resource_item.printout_all(indent_level+1)
+
+ print(indent, "|-CLI command to start challenge:", self.start_challenge_CLI_command_sent, sep='')
+
+ print(indent, "|-CLI command to stop challenge:", self.stop_challenge_CLI_command_sent, sep='')
+
+ # TODO: self.start_challenge_API_command_sent (depends how API commands are stored: likely a list of strings)
+ # TODO: self.stop_challenge_API_command_sent (depends how API commands are stored: likely a list of strings)
+
+
+
+
+def init_challenge_definitions():
+ """Function to initialize challenge definition data."""
+ challenge_defs = []
+
+ # add info to list in memory, one by one, following signature values
+ chall_def_ID = 1
+ chall_def_name = "VM failure"
+ chall_def_challengeType = ChallengeType.COMPUTE_HOST_FAILURE
+ chall_def_recipientID = 1
+ chall_def_impactedCloudResourcesInfo = "OpenStack VM on ctl02 in Arm pod"
+ chall_def_impactedCloudResourceIDs = [2]
+ chall_def_impactedPhysResourcesInfo = "physical server XYZ"
+ chall_def_impactedPhysResourceIDs = [1]
+ chall_def_startChallengeCLICommandSent = "service nova-compute stop"
+ chall_def_stopChallengeCLICommandSent = "service nova-compute restart"
+ # OpenStack VM Suspend vs. Pause: suspend stores the state of VM on disk while pause stores it in memory (RAM)
+ # $ nova suspend NAME
+ # $ nova resume NAME
+
+ chall_def_startChallengeAPICommandSent = []
+ chall_def_stopChallengeAPICommandSent = []
+
+ chall_def_codeID = 5
+
+ challenge_defs.append(ChallengeDefinition(chall_def_ID, chall_def_name,
+ chall_def_challengeType,
+ chall_def_recipientID,
+ chall_def_impactedCloudResourcesInfo,
+ chall_def_impactedCloudResourceIDs,
+ chall_def_impactedPhysResourcesInfo,
+ chall_def_impactedPhysResourceIDs,
+ chall_def_startChallengeCLICommandSent,
+ chall_def_stopChallengeCLICommandSent,
+ chall_def_startChallengeAPICommandSent,
+ chall_def_stopChallengeAPICommandSent,
+ chall_def_codeID))
+
+ # write list to binary file
+ write_list_bin(challenge_defs, FILE_CHALLENGE_DEFINITIONS)
+
+ return challenge_defs
+
+
+######################################################################
+
+class Recipient(AutoBaseObject):
+ """Recipient class for Auto project."""
+ def __init__ (self, recipient_ID, recipient_name,
+ recipient_info,
+ recipient_versionInfo,
+ recipient_accessIPAddress,
+ recipient_accessURL,
+ recipient_userNameCreds,
+ recipient_passwordCreds,
+ recipient_keyCreds,
+ recipient_networkInfo):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, recipient_ID, recipient_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about recipient
+ self.info = recipient_info
+ # optional: version info
+ self.version_info = recipient_versionInfo
+ # optional: IP address of recipient
+ self.access_IP_address = recipient_accessIPAddress
+ # optional: URL of recipient
+ self.access_URL = recipient_accessURL
+ # optional: username for user/pwd credentials
+ self.username_creds = recipient_userNameCreds
+ # optional: password for user/pwd credentials
+ self.password_creds = recipient_passwordCreds
+ # optional: key credentials
+ self.key_creds = recipient_keyCreds
+ # optional: info about recipient's network (VPN, VCN, VN, Neutron, ...)
+ self.network_info = recipient_networkInfo
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Recipient ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-version info:", self.version_info, sep='')
+ print(indent, "|-IP address:", self.access_IP_address, sep='')
+ print(indent, "|-URL:", self.access_URL, sep='')
+ print(indent, "|-username for user/pwd credentials:", self.username_creds, sep='')
+ print(indent, "|-password for user/pwd credentials:", self.password_creds, sep='')
+ print(indent, "|-key credentials:", self.key_creds, sep='')
+ print(indent, "|-info about network:", self.network_info, sep='')
+
+
+
+def init_recipients():
+ """Function to initialize recipient data."""
+ test_recipients = []
+
+ # add info to list in memory, one by one, following signature values
+ recipient_ID = 1
+ recipient_name = "OpenStack on Arm pod"
+ recipient_info = "controller resolves to one of the CTL VMs"
+ recipient_versionInfo = ""
+ recipient_accessIPAddress = "172.16.10.10"
+ recipient_accessURL = ""
+ recipient_userNameCreds = "ali"
+ recipient_passwordCreds = "baba"
+ recipient_keyCreds = "ssh-rsa k7fjsnEFzESfg6phg"
+ recipient_networkInfo = "UNH IOL 172.16.0.0/22"
+
+ test_recipients.append(Recipient(recipient_ID, recipient_name,
+ recipient_info,
+ recipient_versionInfo,
+ recipient_accessIPAddress,
+ recipient_accessURL,
+ recipient_userNameCreds,
+ recipient_passwordCreds,
+ recipient_keyCreds,
+ recipient_networkInfo))
+
+ recipient_ID = 2
+ recipient_name = "Kubernetes on x86 pod"
+ recipient_info = "bare metal"
+ recipient_versionInfo = "v1.9"
+ recipient_accessIPAddress = "8.9.7.6"
+ recipient_accessURL = ""
+ recipient_userNameCreds = "kuber"
+ recipient_passwordCreds = "netes"
+ recipient_keyCreds = "ssh-rsa 0fjs7hjghsa37fhfs"
+ recipient_networkInfo = "UNH IOL 10.10.30.157/22"
+
+
+ test_recipients.append(Recipient(recipient_ID, recipient_name,
+ recipient_info,
+ recipient_versionInfo,
+ recipient_accessIPAddress,
+ recipient_accessURL,
+ recipient_userNameCreds,
+ recipient_passwordCreds,
+ recipient_keyCreds,
+ recipient_networkInfo))
+
+ # write list to binary file
+ write_list_bin(test_recipients, FILE_RECIPIENTS)
+
+ return test_recipients
+
+
+######################################################################
+
+class MetricDefinition(AutoBaseObject):
+ """Metric Definition class for Auto project. Actual metrics are subclasses with specific calculation methods."""
+ def __init__ (self, metric_def_ID, metric_def_name,
+ metric_def_info):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, metric_def_ID, metric_def_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about metric: formula, etc.
+ self.info = metric_def_info
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Metric Definition ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-info:", self.info, sep='')
+
+
+class MetricValue:
+ """Object for storing a measurement of a Metric Definition for Auto project, with common attributes
+ (value, timestamp, metric_def_ID).
+ """
+ def __init__ (self, param_value, param_timestamp, param_metric_def_ID):
+ self.value = param_value
+ self.timestamp = param_timestamp
+ self.metric_def_ID = param_metric_def_ID
+ # for display
+ def __repr__(self):
+ return ("metric_def_ID="+str(self.metric_def_ID)+
+ " value="+str(self.value)+
+ " timestamp="+self.timestamp.strftime("%Y-%m-%d %H:%M:%S"))
+ # for print
+ def __str__(self):
+ return ("metric_def_ID="+str(self.metric_def_ID)+
+ " value="+str(self.value)+
+ " timestamp="+self.timestamp.strftime("%Y-%m-%d %H:%M:%S"))
+
+
+class RecoveryTimeDef(MetricDefinition):
+ """Recovery Time Metric Definition class for Auto project.
+ Formula: recovery_time = time_restoration_detected - time_challenge_started
+ (measured duration between start of challenge (failure, stress, ...) and detection of restoration).
+ Enter values as datetime objects.
+ """
+ def compute (self,
+ time_challenge_started, time_restoration_detected):
+ """time_challenge_started: datetime object, time at which challenge was started;
+ time_restoration_detected: datetime object, time at which restoration was detected;
+ returns a MetricValue containing a timedelta object as value.
+ """
+
+ # a few checks first
+ if time_challenge_started > time_restoration_detected:
+ print("time_challenge_started should be <= time_restoration_detected")
+ print("time_challenge_started=",time_challenge_started," time_restoration_detected=",time_restoration_detected)
+ sys.exit() # stop entire program, because formulas MUST be correct
+
+ measured_metric_value = time_restoration_detected - time_challenge_started #difference between 2 datetime is a timedelta
+ timestamp = datetime.now()
+
+ return MetricValue(measured_metric_value, timestamp, self.ID)
+
+
+class UptimePercentageDef(MetricDefinition):
+ """Uptime Percentage Metric Definition class for Auto project.
+ Formula: uptime / (reference_time - planned_downtime))
+ Enter values in same unit (e.g., all in seconds, or all in minutes, or all in hours, etc.).
+ """
+ def compute (self,
+ measured_uptime, reference_time, planned_downtime):
+ """measured_uptime: amount of time the service/system/resource was up and running;
+ reference_time: amount of time during which the measurement was made;
+ planned_downtime: amount to time during reference_time, which was planned to be down;
+ returns a MetricValue object, with a value between 0 and 100.
+ """
+
+ # a few checks first
+ if measured_uptime < 0.0:
+ print("measured_uptime should be >= 0.0")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if reference_time <= 0.0:
+ print("reference_time should be > 0.0")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if planned_downtime < 0.0:
+ print("planned_downtime should be >= 0.0")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if reference_time < planned_downtime:
+ print("reference_time should be >= planned_downtime")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if measured_uptime > reference_time:
+ print("measured_uptime should be <= reference_time")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if measured_uptime > (reference_time - planned_downtime):
+ print("measured_uptime should be <= (reference_time - planned_downtime)")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+
+ measured_metric_value = 100 * measured_uptime / (reference_time - planned_downtime)
+ timestamp = datetime.now()
+
+ return MetricValue(measured_metric_value, timestamp, self.ID)
+
+
+
+def init_metric_definitions():
+ """Function to initialize metric definition data."""
+ metric_definitions = []
+
+ # add info to list in memory, one by one, following signature values
+ metric_def_ID = 1
+ metric_def_name = "Recovery Time"
+ metric_def_info = "Measures time taken by ONAP to restore a VNF"
+ metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,
+ metric_def_info))
+
+ metric_def_ID = 2
+ metric_def_name = "Uptime Percentage"
+ metric_def_info = "Measures ratio of uptime to reference time, not counting planned downtime"
+ metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,
+ metric_def_info))
+
+
+ # write list to binary file
+ write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)
+
+ return metric_definitions
+
+
+
+######################################################################
+
+class PhysicalResource(AutoBaseObject):
+ """Physical Resource class for Auto project."""
+ def __init__ (self, phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, phys_resrc_ID, phys_resrc_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about physical resource
+ self.info = phys_resrc_info
+ # optional: main IP address of physical resource (e.g. management interface for a server)
+ self.IP_address = phys_resrc_IPAddress
+ # optional: main MAC address of physical resource
+ self.MAC_address = phys_resrc_MACAddress
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Physical Resource ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-info:", self.info, sep='')
+ print(indent, "|-IP address:", self.IP_address, sep='')
+ print(indent, "|-MAC address:", self.MAC_address, sep='')
+
+
+
+def init_physical_resources():
+ """Function to initialize physical resource data."""
+ test_physical_resources = []
+
+ # add info to list in memory, one by one, following signature values
+ phys_resrc_ID = 1
+ phys_resrc_name = "small-cavium-1"
+ phys_resrc_info = "Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
+ phys_resrc_IPAddress = "10.10.50.12"
+ phys_resrc_MACAddress = "00-14-22-01-23-45"
+
+ test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress))
+
+ phys_resrc_ID = 2
+ phys_resrc_name = "medium-cavium-1"
+ phys_resrc_info = "Jump server in New York pod, 96 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
+ phys_resrc_IPAddress = "30.31.32.33"
+ phys_resrc_MACAddress = "0xb3:22:05:c1:aa:82"
+
+ test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress))
+
+ phys_resrc_ID = 3
+ phys_resrc_name = "mega-cavium-666"
+ phys_resrc_info = "Jump server in Las Vegas, 1024 cores, 1024G RAM, 6666G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
+ phys_resrc_IPAddress = "54.53.52.51"
+ phys_resrc_MACAddress = "01-23-45-67-89-ab"
+
+ test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress))
+
+
+ # write list to binary file
+ write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)
+
+ return test_physical_resources
+
+
+######################################################################
+
+class CloudVirtualResource(AutoBaseObject):
+ """Cloud Virtual Resource class for Auto project."""
+ def __init__ (self, cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, cldvirtres_ID, cldvirtres_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about cloud virtual resource
+ self.info = cldvirtres_info
+ # optional: main IP address of cloud virtual resource (e.g. management interface for a virtual router)
+ self.IP_address = cldvirtres_IPAddress
+ # optional: URL address of cloud virtual resource
+ self.URL = cldvirtres_URL
+ # optional: related/associated physical resources (if known and useful or interesting, list of integer IDs)
+ self.related_phys_rsrc_ID_list = cldvirtres_related_phys_rsrcIDs
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Cloud Virtual Resource ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-info:", self.info, sep='')
+ print(indent, "|-IP address:", self.IP_address, sep='')
+ print(indent, "|-URL:", self.URL, sep='')
+
+ if self.related_phys_rsrc_ID_list != None:
+ if len(self.related_phys_rsrc_ID_list) >0:
+ print(indent, "|-related/associated physical resource(s):", sep='')
+ for phys_resource_ID in self.related_phys_rsrc_ID_list:
+ phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)
+ if phys_resource_item != None:
+ phys_resource_item.printout_all(indent_level+1)
+
+
+def init_cloud_virtual_resources():
+ """Function to initialize cloud virtual resource data."""
+ test_cldvirt_resources = []
+
+ # add info to list in memory, one by one, following signature values
+ cldvirtres_ID = 1
+ cldvirtres_name = "nova-compute-1"
+ cldvirtres_info = "nova VM in Arm pod"
+ cldvirtres_IPAddress = "50.60.70.80"
+ cldvirtres_URL = "http://50.60.70.80:8080"
+ cldvirtres_related_phys_rsrcIDs = [1,3]
+
+ test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs))
+
+ cldvirtres_ID = 2
+ cldvirtres_name = "nova-compute-2"
+ cldvirtres_info = "nova VM in LaaS"
+ cldvirtres_IPAddress = "50.60.70.80"
+ cldvirtres_URL = "http://50.60.70.80:8080"
+ cldvirtres_related_phys_rsrcIDs = [2,3]
+
+ test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs))
+
+ cldvirtres_ID = 3
+ cldvirtres_name = "nova-compute-3"
+ cldvirtres_info = "nova VM in x86 pod"
+ cldvirtres_IPAddress = "50.60.70.80"
+ cldvirtres_URL = "http://50.60.70.80:8080"
+ cldvirtres_related_phys_rsrcIDs = [1]
+
+ test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs))
+
+
+ # write list to binary file
+ write_list_bin(test_cldvirt_resources, FILE_CLOUD_RESOURCES)
+
+ return test_cldvirt_resources
+
+
+######################################################################
+
+class VNFService(AutoBaseObject):
+ """VNF or e2e Service class for Auto project."""
+ def __init__ (self, vnf_serv_ID, vnf_serv_name,
+ vnf_serv_info,
+ vnf_serv_IPAddress,
+ vnf_serv_URL,
+ vnf_serv_related_phys_rsrcIDs,
+ vnf_serv_related_cloudvirt_rsrcIDs):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, vnf_serv_ID, vnf_serv_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about VNF / e2e Service
+ self.info = vnf_serv_info
+ # optional: main IP address of VNF / e2e Service (e.g. management interface for a vCPE)
+ self.IP_address = vnf_serv_IPAddress
+ # optional: URL address of VNF / e2e Service
+ self.URL = vnf_serv_URL
+ # optional: related/associated physical resources (if known and useful or interesting, list of integer IDs)
+ self.related_phys_rsrc_ID_list = vnf_serv_related_phys_rsrcIDs
+ # optional: related/associated cloud virtual resources (if known and useful or interesting, list of integer IDs)
+ self.related_cloud_virt_rsrc_ID_list = vnf_serv_related_cloudvirt_rsrcIDs
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "VNF or e2e Service ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-info:", self.info, sep='')
+ print(indent, "|-IP address:", self.IP_address, sep='')
+ print(indent, "|-URL:", self.URL, sep='')
+
+ if self.related_phys_rsrc_ID_list != None:
+ if len(self.related_phys_rsrc_ID_list) >0:
+ print(indent, "|-related/associated physical resource(s):", sep='')
+ for phys_resource_ID in self.related_phys_rsrc_ID_list:
+ phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)
+ if phys_resource_item != None:
+ phys_resource_item.printout_all(indent_level+1)
+
+ if self.related_cloud_virt_rsrc_ID_list != None:
+ if len(self.related_cloud_virt_rsrc_ID_list) >0:
+ print(indent, "|-related/associated cloud virtual resource(s):", sep='')
+ for cloud_resource_ID in self.related_cloud_virt_rsrc_ID_list:
+ cloud_resource_item = get_indexed_item_from_list(cloud_resource_ID, AutoResilGlobal.cloud_virtual_resource_list)
+ if cloud_resource_item != None:
+ cloud_resource_item.printout_all(indent_level+1)
+
+
+
+def init_VNFs_Services():
+ """Function to initialize VNFs and e2e Services data."""
+ test_VNFs_Services = []
+
+ # add info to list in memory, one by one, following signature values
+ vnf_serv_ID = 1
+ vnf_serv_name = "vCPE-1"
+ vnf_serv_info = "virtual CPE in Arm pod"
+ vnf_serv_IPAddress = "5.4.3.2"
+ vnf_serv_URL = "http://5.4.3.2:8080"
+ vnf_serv_related_phys_rsrcIDs = [1,2]
+ vnf_serv_related_cloudvirt_rsrcIDs = [1]
+
+ test_VNFs_Services.append(VNFService(vnf_serv_ID, vnf_serv_name,
+ vnf_serv_info,
+ vnf_serv_IPAddress,
+ vnf_serv_URL,
+ vnf_serv_related_phys_rsrcIDs,
+ vnf_serv_related_cloudvirt_rsrcIDs))
+
+
+ vnf_serv_ID = 2
+ vnf_serv_name = "vFW-1"
+ vnf_serv_info = "virtual Firewall in x86 pod"
+ vnf_serv_IPAddress = "6.7.8.9"
+ vnf_serv_URL = "http://6.7.8.9:8080"
+ vnf_serv_related_phys_rsrcIDs = [3]
+ vnf_serv_related_cloudvirt_rsrcIDs = [2,3]
+
+ test_VNFs_Services.append(VNFService(vnf_serv_ID, vnf_serv_name,
+ vnf_serv_info,
+ vnf_serv_IPAddress,
+ vnf_serv_URL,
+ vnf_serv_related_phys_rsrcIDs,
+ vnf_serv_related_cloudvirt_rsrcIDs))
+
+ # write list to binary file
+ write_list_bin(test_VNFs_Services, FILE_VNFS_SERVICES)
+
+ return test_VNFs_Services
+
+
+
+######################################################################
+
+class TimeStampedStringList:
+ """This is a utility class for Auto project, for execution classes (ChallengeExecution and TestExecution).
+ It stores a list of timestrings and timestamps them.
+ """
+ def __init__ (self):
+ self.__string_list = []
+ self.__timestamp_list = []
+
+ def append_to_list(self, string_to_append):
+ """Append an object to a list of strings and adds a timestamp."""
+ if type(string_to_append)==str:
+ current_time = datetime.now()
+ self.__string_list.append(string_to_append)
+ self.__timestamp_list.append(current_time) # timestamp will have the same index as string
+ else:
+ print("appended object must be a string, string_to_append=",string_to_append)
+ sys.exit() # stop entire program, because string MUST be correct
+
+ def get_raw_list(self):
+ return self.__string_list
+
+ def get_raw_list_timestamps(self):
+ return self.__timestamp_list
+
+ def get_timestamped_strings(self):
+ """return a list of strings with timestamps as prefixes (not showing microseconds)."""
+ ret_list = []
+ i = 0
+ while i < len(self.__string_list):
+ ret_list.append(self.__timestamp_list[i].strftime("%Y-%m-%d %H:%M:%S")+" "+self.__string_list[i])
+ i += 1
+ return ret_list
+
+ def length(self):
+ return len(self.__string_list)
+
+
+######################################################################
+
+class ChallengeExecution(AutoBaseObject):
+ """Class for Auto project, tracking the execution details of a Challenge Definition,
+ with a method to dump all results to a CSV file.
+ """
+ def __init__ (self, chall_exec_ID, chall_exec_name,
+ chall_exec_challDefID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, chall_exec_ID, chall_exec_name)
+
+ # specifics for this subclass
+
+ # associated Challenge Definition (ID)
+ self.challenge_def_ID = chall_exec_challDefID
+
+ # attributes getting values during execution
+
+ # associated Start and Stop times (when Challenge was started and stopped)
+ self.start_time = None
+ self.stop_time = None
+ # log: list of strings, to capture any interesting or significant event
+ self.log = TimeStampedStringList()
+ # list of CLI responses
+ self.CLI_responses = TimeStampedStringList()
+ # list of API responses (convert to strings)
+ self.API_responses = TimeStampedStringList()
+
+ def write_to_csv(self):
+ """Generic function to dump all Challenge Execution data in a CSV file."""
+
+ dump_list = []
+
+ # add rows one by one, each as a list, even if only 1 element
+
+ dump_list.append(["challenge execution ID",self.ID])
+ dump_list.append(["challenge execution name",self.name])
+
+ dump_list.append(["challenge definition ID",self.challenge_def_ID])
+ challenge_def_name = get_indexed_item_from_file(self.challenge_def_ID, FILE_CHALLENGE_DEFINITIONS)
+ dump_list.append(["challenge definition name",challenge_def_name])
+
+ if self.start_time != None:
+ dump_list.append(["challenge start time",self.start_time.strftime("%Y-%m-%d %H:%M:%S")])
+ if self.stop_time != None:
+ dump_list.append(["challenge stop time",self.stop_time.strftime("%Y-%m-%d %H:%M:%S")])
+
+ if self.log.length() > 0 :
+ dump_list.append(["Log:"])
+ for item in self.log.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.CLI_responses.length() > 0 :
+ dump_list.append(["CLI responses:"])
+ for item in self.CLI_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.API_responses.length() > 0 :
+ dump_list.append(["API responses:"])
+ for item in self.API_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ try:
+ # output CSV file name: challDefExec + ID + start time + .csv
+ file_name = "challDefExec" + "{0:0=3d}".format(self.challenge_def_ID) + "-" + self.start_time.strftime("%Y-%m-%d-%H-%M-%S") + ".csv"
+ with open(file_name, "w", newline="") as file:
+ csv_file_writer = csv.writer(file)
+ csv_file_writer.writerows(dump_list)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+
+######################################################################
+
+class TimeStampedMetricValueList:
+ """This is a utility class for Auto project, for the test execution class (TestExecution).
+ It stores a list of Metric Values (with their respective timestamps).
+ """
+ def __init__ (self):
+ self.__metric_value_list = []
+
+ def append_to_list(self, metric_value_to_append):
+ """Append a metric value (MetricValue) to the list. MetricValue already has a timestamp attribute."""
+ if type(metric_value_to_append)==MetricValue:
+ self.__metric_value_list.append(metric_value_to_append)
+ else:
+ print("appended object must be a MetricValue, metric_value_to_append=",metric_value_to_append)
+ sys.exit() # stop entire program, because metric_value_to_append MUST be correct
+
+ def get_raw_list(self):
+ return self.__metric_value_list
+
+ def get_timestamped_metric_values_as_strings(self):
+ """Return a list of strings with metric values and timestamps as prefixes (not showing microseconds).
+ Also show the metric def ID in parentheses.
+ """
+ ret_list = []
+ i = 0
+ while i < len(self.__metric_value_list):
+ ret_list.append(self.__metric_value_list[i].timestamp.strftime("%Y-%m-%d %H:%M:%S") + " " +
+ str(self.__metric_value_list[i].value) +
+ "(" + str(self.__metric_value_list[i].metric_def_ID) + ")")
+ i += 1
+ return ret_list
+
+ def length(self):
+ return len(self.__metric_value_list)
+
+
+
+######################################################################
+
+class TestExecution(AutoBaseObject):
+ """Class for Auto project, tracking the execution details of a Test Definition,
+ with a method to dump all results to a CSV file.
+ """
+ def __init__ (self, test_exec_ID, test_exec_name,
+ test_exec_testDefID,
+ test_exec_challengeExecID,
+ test_exec_userID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, test_exec_ID, test_exec_name)
+
+ # specifics for this subclass
+
+ # associated Test Definition (ID)
+ self.test_def_ID = test_exec_testDefID
+ # associated Challenge Execution (ID) (execution instance of a challenge definition); get challenge start time from it;
+ self.challenge_exec_ID = test_exec_challengeExecID
+ # associated User (ID)
+ self.user_ID = test_exec_userID
+
+ # attributes getting values during execution
+
+ # associated Start and Finish times (when test was started and finished)
+ self.start_time = None
+ self.finish_time = None
+ # time when the challenge was started [datetime]; same value as associated ChallengeExecution.start_time;
+ # keep a copy here for print convenience;
+ self.challenge_start_time = None
+ # time when the VNF/service restoration (by ONAP) was detected by the test code [datetime]
+ self.restoration_detection_time = None
+ # key metric: recovery time, defined as time elapsed between start of challenge and restoration detection [timedelta]
+ self.recovery_time = None
+ # list of associated metric values
+ self.associated_metric_values = TimeStampedMetricValueList()
+ # log: list of strings, to capture any interesting or significant event
+ self.log = TimeStampedStringList()
+ # list of CLI responses
+ self.CLI_responses = TimeStampedStringList()
+ # list of API responses (convert to strings)
+ self.API_responses = TimeStampedStringList()
+
+
+ def write_to_csv(self):
+ """Generic function to dump all Test Execution data in a CSV file."""
+
+ dump_list = []
+
+ # add rows one by one, each as a list, even if only 1 element
+
+ dump_list.append(["test execution ID",self.ID])
+ dump_list.append(["test execution name",self.name])
+
+ dump_list.append(["test definition ID",self.test_def_ID])
+ test_def_name = get_indexed_item_from_file(self.test_def_ID, FILE_TEST_DEFINITIONS)
+ dump_list.append(["test definition name",test_def_name])
+
+ dump_list.append(["associated challenge execution ID",self.challenge_exec_ID])
+ dump_list.append(["user ID",self.user_ID])
+
+ if self.start_time != None:
+ dump_list.append(["test start time",self.start_time.strftime("%Y-%m-%d %H:%M:%S")])
+
+ if self.finish_time != None:
+ dump_list.append(["test finish time",self.finish_time.strftime("%Y-%m-%d %H:%M:%S")])
+
+ if self.challenge_start_time != None:
+ dump_list.append(["challenge stop time",self.challenge_start_time.strftime("%Y-%m-%d %H:%M:%S")])
+ if self.restoration_detection_time != None:
+ dump_list.append(["restoration detection time",self.restoration_detection_time.strftime("%Y-%m-%d %H:%M:%S")])
+ if self.recovery_time != None:
+ if self.recovery_time.value != None:
+ if type(self.recovery_time.value)==timedelta:
+ # timedelta: days and seconds are attributes, total_seconds() is a method
+ dump_list.append(["MEASURED RECOVERY TIME (s)",self.recovery_time.value.total_seconds()])
+ rtday = self.recovery_time.value.days
+ rthrs = self.recovery_time.value.seconds // 3600
+ rtmin = (self.recovery_time.value.seconds % 3600) // 60
+ rtsec = self.recovery_time.value.seconds % 60
+ rtmil = self.recovery_time.value.microseconds
+ dump_list.append(["MEASURED RECOVERY TIME (days, hours, mins, seconds, microseconds)",
+ rtday, rthrs, rtmin, rtsec, rtmil])
+
+ if self.associated_metric_values.length() > 0 :
+ dump_list.append(["Metric Values:"])
+ for item in self.associated_metric_values.get_timestamped_metric_values_as_strings():
+ dump_list.append([item])
+
+ if self.log.length() > 0 :
+ dump_list.append(["Log:"])
+ for item in self.log.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.CLI_responses.length() > 0 :
+ dump_list.append(["CLI responses:"])
+ for item in self.CLI_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.API_responses.length() > 0 :
+ dump_list.append(["API responses:"])
+ for item in self.API_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ try:
+ # output CSV file name: testDefExec + ID + start time + .csv
+ file_name = "testDefExec" + "{0:0=3d}".format(self.test_def_ID) + "-" + self.start_time.strftime("%Y-%m-%d-%H-%M-%S") + ".csv"
+ with open(file_name, "w", newline="") as file:
+ csv_file_writer = csv.writer(file)
+ csv_file_writer.writerows(dump_list)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+######################################################################
+def dump_all_binaries_to_CSV():
+ """Get all content from all Definition data binary files, and dump everything in a snapshot CSV file."""
+ ## TODO
+ timenow = datetime.now()
+
+
+######################################################################
+def main():
+
+
+ # everything here is for unit-testing of this module; not part of actual code
+ tcs = init_test_cases()
+ print(tcs)
+
+ test_case_ID = 33
+ test_case_name = "auto-resiliency-xyz"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-400"
+ add_test_case_to_file(test_case_ID, test_case_name, test_case_JIRA_URL)
+ print(read_list_bin(FILE_TEST_CASES))
+
+ print(get_indexed_item_from_file(3,FILE_TEST_CASES))
+ print(get_indexed_item_from_file(257,FILE_TEST_CASES))
+
+ print("tcs[4]=",tcs[4])
+ print(tcs[4].ID)
+ print(tcs[4].name)
+ print(tcs[4].JIRA_URL)
+
+ print()
+
+ challgs = init_challenge_definitions()
+ print(challgs)
+ chall = get_indexed_item_from_file(1,FILE_CHALLENGE_DEFINITIONS)
+ print(chall)
+ chall.run_start_challenge_code()
+ chall.run_stop_challenge_code()
+
+ print()
+
+ tds = init_test_definitions()
+ print(tds)
+ td = get_indexed_item_from_file(1,FILE_TEST_DEFINITIONS)
+ print(td)
+ #td.printout_all(0)
+ #td.run_test_code()
+
+ print()
+
+ rcps = init_recipients()
+ print(rcps)
+ rcp = get_indexed_item_from_file(1,FILE_RECIPIENTS)
+ print(rcp)
+
+ print()
+
+
+ metricdefs = init_metric_definitions()
+ print(metricdefs)
+
+ metricdef = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS)
+ print(metricdef)
+ t1 = datetime(2018,4,1,15,10,12,500000)
+ t2 = datetime(2018,4,1,15,13,43,200000)
+ r1 = metricdef.compute(t1,t2)
+ print(r1)
+ print()
+
+ metricdef = get_indexed_item_from_file(2,FILE_METRIC_DEFINITIONS)
+ print(metricdef)
+ r1 = metricdef.compute(735, 1000, 20)
+ r2 = metricdef.compute(980, 1000, 20)
+ r3 = metricdef.compute(920.0, 1000.0, 0.0)
+ r4 = metricdef.compute(920.0, 1500.0, 500.0)
+ r5 = metricdef.compute(919.99999, 1000.0, 0.000001)
+ print(r1)
+ print(r2)
+ print(r3)
+ print(r4)
+ print(r5)
+
+ print()
+
+ physRs = init_physical_resources()
+ print(physRs)
+ physR = get_indexed_item_from_file(1,FILE_PHYSICAL_RESOURCES)
+ print(physR)
+
+ print()
+
+ cloudRs = init_cloud_virtual_resources()
+ print(cloudRs)
+ cloudR = get_indexed_item_from_file(1,FILE_CLOUD_RESOURCES)
+ print(cloudR)
+
+ print()
+
+ VNFs = init_VNFs_Services()
+ print(VNFs)
+ VNF = get_indexed_item_from_file(1,FILE_VNFS_SERVICES)
+ print(VNF)
+
+ print()
+
+ ce1 = ChallengeExecution(1,"essai challenge execution",1)
+ ce1.start_time = datetime.now()
+ ce1.log.append_to_list("challenge execution log event 1")
+ ce1.log.append_to_list("challenge execution log event 2")
+ ce1.CLI_responses.append_to_list("challenge execution CLI response 1")
+ ce1.log.append_to_list("challenge execution log event 3")
+ ce1.CLI_responses.append_to_list("challenge execution CLI response 2")
+ ce1.log.append_to_list("challenge execution log event 4")
+ ce1.log.append_to_list("challenge execution log event 5")
+ ce1.API_responses.append_to_list("challenge execution API response 1")
+ ce1.log.append_to_list("challenge execution log event 6")
+ print("log length: ", ce1.log.length())
+ print(ce1.log.get_timestamped_strings())
+ print("CLI_responses length: ", ce1.CLI_responses.length())
+ print(ce1.CLI_responses.get_timestamped_strings())
+ print("API_responses length: ", ce1.API_responses.length())
+ print(ce1.API_responses.get_timestamped_strings())
+ ce1.stop_time = datetime.now()
+ ce1.write_to_csv()
+
+ print()
+
+ te1 = TestExecution(1,"essai test execution",1,1,"Gerard")
+ te1.start_time = datetime.now()
+ te1.challenge_start_time = ce1.start_time # illustrate how to set test execution challenge start time
+ print("te1.challenge_start_time:",te1.challenge_start_time)
+
+ te1.log.append_to_list("test execution log event 1")
+ te1.log.append_to_list("test execution log event 2")
+ te1.CLI_responses.append_to_list("test execution CLI response 1")
+ te1.CLI_responses.append_to_list("test execution CLI response 2")
+
+ metricdef = get_indexed_item_from_file(2,FILE_METRIC_DEFINITIONS) # get a metric definition, some ID
+ print(metricdef)
+ r1 = metricdef.compute(735, 1000, 20) # compute a metric value
+ print(r1)
+ te1.associated_metric_values.append_to_list(r1) # append a measured metric value to test execution
+ r1 = metricdef.compute(915, 1000, 20) # compute a metric value
+ print(r1)
+ te1.associated_metric_values.append_to_list(r1) # append a measured metric value to test execution
+
+ te1.log.append_to_list("test execution log event 3")
+ te1.API_responses.append_to_list("test execution API response 1")
+
+ print("log length: ", te1.log.length())
+ print(te1.log.get_timestamped_strings())
+ print("CLI_responses length: ", te1.CLI_responses.length())
+ print(te1.CLI_responses.get_timestamped_strings())
+ print("API_responses length: ", te1.API_responses.length())
+ print(te1.API_responses.get_timestamped_strings())
+ print("associated_metric_values length: ", te1.associated_metric_values.length())
+ print(te1.associated_metric_values.get_timestamped_metric_values_as_strings())
+
+ te1.restoration_detection_time = datetime.now()
+ print("te1.restoration_detection_time:",te1.restoration_detection_time)
+ metricdef = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS) # get Recovery Time metric definition: ID=1
+ print(metricdef)
+ r1 = metricdef.compute(te1.challenge_start_time, te1.restoration_detection_time) # compute a metric value, for Recovery time
+ te1.recovery_time = r1 # assignment could be direct, i.e. te1.recovery_time = metricdef.compute(...)
+
+ te1.finish_time = datetime.now() # test execution is finished
+ te1.write_to_csv()
+
+ print()
+
+ print("\nCiao")
+
+if __name__ == "__main__":
+ main()
+
+
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilRunTest.py b/lib/auto/testcase/resiliency/AutoResilRunTest.py
new file mode 100644
index 0000000..1364b4a
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilRunTest.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: execution of tests
+# (might merge this module with Main module)
+## Receive/retrieve chosen test def info
+##pre-test (pings, etc.)
+##launch test:
+## create execution instances of Test and Challenge
+## simulate challenge
+## get time T1
+## loop:
+## wait for VNF recovery
+## optional other metrics
+## store data and logs
+## get time T2
+## stop challenge
+## reset (with ONAP MSO)
+## store data and logs
+##post-tests
+##logs
+
+
+######################################################################
+# import statements
+import AutoResilGlobal
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/clouds.yaml b/lib/auto/testcase/resiliency/clouds.yaml
new file mode 100644
index 0000000..593a07c
--- /dev/null
+++ b/lib/auto/testcase/resiliency/clouds.yaml
@@ -0,0 +1,91 @@
+clouds:
+
+ # Openstack instance on Arm pod, controller IP@ 172.16.10.10
+ # Horizon: https://10.10.50.103/project/
+ # Identity API according to Horizon dashboard: https://10.10.50.103:5000/v2.0
+ # other potential auth_url: http://172.16.10.10:35357/v3
+ # (OS_AUTH_URL=http://controller:35357/v3)
+ # 2 project names: admin, service (project = tenant)
+ # project ID: 122caf64b3df4818bf2ce5ba793226b2
+ # EC2 URL: https://10.10.50.103:8773/services/Cloud
+ # EC2 access key: bcf3c69a7d1c405e9757f87f26faf19f
+ # 10.10.50.0/8: floating IP@
+ # 10.10.10.0/8: fixed IP@
+ armopenstack:
+ auth:
+ auth_url: https://10.10.50.103:5000/v2.0
+ username: admin
+ password: opnfv_secret
+ project_name: admin
+ region_name: RegionOne
+
+ # Openstack instance on LaaS hpe16, from OPNFV Euphrates, controller IP@ (mgt: 172.16.10.101; public: 10.16.0.101)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.101:35357/v2.0
+ # internal: http://172.16.10.101:5000/v2.0
+ # public: http://10.16.0.101:5000/v2.0 : works on LaaS hpe16, from hpe16
+ hpe16openstackEuphrates:
+ auth:
+ auth_url: http://10.16.0.101:5000/v2.0
+ username: admin
+ password: opnfv_secret
+ project_name: admin
+ region_name: RegionOne
+
+ # Openstack instance on LaaS hpe16, from OPNFV Fraser, controller IP@ (mgt: 172.16.10.36; public: 10.16.0.107)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.36:35357/v3
+ # internal: http://172.16.10.36:5000/v3
+ # public: http://10.16.0.107:5000/v3
+ hpe16openstackFraser:
+ auth:
+ auth_url: http://10.16.0.107:5000/v3
+ username: admin
+ password: opnfv_secret
+ project_name: admin
+ region_name: RegionOne
+
+# ubuntu@ctl01:~$ openstack project show admin
+# +-------------+----------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------+
+# | description | OpenStack Admin tenant |
+# | domain_id | default |
+# | enabled | True |
+# | id | 04fcfe7aa83f4df79ae39ca748aa8637 |
+# | is_domain | False |
+# | name | admin |
+# | parent_id | default |
+# +-------------+----------------------------------+
+
+# (openstack) domain show default
+# +-------------+----------------------------------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------------------------------+
+# | description | Domain created automatically to support V2.0 operations. |
+# | enabled | True |
+# | id | default |
+# | name | Default |
+# +-------------+----------------------------------------------------------+
+
+# (openstack) domain show heat_user_domain
+# +-------------+---------------------------------------------+
+# | Field | Value |
+# +-------------+---------------------------------------------+
+# | description | Contains users and projects created by heat |
+# | enabled | True |
+# | id | d9c29adac0fe4816922d783b257879d6 |
+# | name | heat_user_domain |
+# +-------------+---------------------------------------------+
+
+export OS_AUTH_URL=http://10.16.0.107:5000/v3
+export OS_PROJECT_ID=04fcfe7aa83f4df79ae39ca748aa8637
+export OS_PROJECT_NAME="admin"
+export OS_USER_DOMAIN_NAME="Default"
+export OS_USERNAME="admin"
+export OS_PASSWORD="opnfv_secret"
+export OS_REGION_NAME="RegionOne"
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
+
+
diff --git a/lib/auto/testcase/vnf/vbng/MANIFEST.json b/lib/auto/testcase/vnf/vbng/MANIFEST.json
new file mode 100644
index 0000000..0b34111
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbng/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vbng.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vbng.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env
new file mode 100644
index 0000000..be4f972
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env
@@ -0,0 +1,35 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ brgemu_bng_private_net_id: zdfw1bngin01_private
+ brgemu_bng_private_subnet_id: zdfw1bngin01_sub_private
+ bng_gmux_private_net_id: zdfw1bngmux01_private
+ bng_gmux_private_subnet_id: zdfw1bngmux01_sub_private
+ onap_private_net_id: PUT THE ONAP PRIVATE NETWORK NAME HERE
+ onap_private_subnet_id: PUT THE ONAP PRIVATE SUBNETWORK NAME HERE
+ onap_private_net_cidr: 10.0.0.0/16
+ cpe_signal_net_id: zdfw1cpe01_private
+ cpe_signal_subnet_id: zdfw1cpe01_sub_private
+ brgemu_bng_private_net_cidr: 10.3.0.0/24
+ bng_gmux_private_net_cidr: 10.1.0.0/24
+ cpe_signal_private_net_cidr: 10.4.0.0/24
+ vbng_private_ip_0: 10.3.0.1
+ vbng_private_ip_1: 10.0.101.10
+ vbng_private_ip_2: 10.4.0.3
+ vbng_private_ip_3: 10.1.0.10
+ vbng_name_0: zdcpe1cpe01bng01
+ vnf_id: vCPE_Infrastructure_Metro_vBNG_demo_app
+ vf_module_id: vCPE_Intrastructure_Metro_vBNG
+ dcae_collector_ip: 10.0.4.102
+ dcae_collector_port: 8080
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vbng_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-radius-client-for-vbng/src/patches/Vpp-Integrate-FreeRADIUS-Client-for-vBNG.patch
diff --git a/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml
new file mode 100644
index 0000000..3dd7ca0
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml
@@ -0,0 +1,288 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE virtual Broadband Network Gateway (vBNG)
+
+##############
+# #
+# PARAMETERS #
+# #
+##############
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ brgemu_bng_private_net_id:
+ type: string
+ label: vBNG IN private network name or ID
+ description: Private network that connects vBRG to vBNG
+ brgemu_bng_private_subnet_id:
+ type: string
+ label: vBNG IN private sub-network name or ID
+ description: vBNG IN private sub-network name or ID
+ brgemu_bng_private_net_cidr:
+ type: string
+ label: vBNG IN private network CIDR
+ description: The CIDR of the input side of vBNG private network
+ bng_gmux_private_net_id:
+ type: string
+ label: vBNG vGMUX private network name or ID
+ description: Private network that connects vBNG to vGMUX
+ bng_gmux_private_subnet_id:
+ type: string
+ label: vBNG vGMUX private sub-network name or ID
+ description: vBNG vGMUX private sub-network name or ID
+ bng_gmux_private_net_cidr:
+ type: string
+ label: vGMUX private network CIDR
+ description: The CIDR of the input side of vGMUX private network
+ onap_private_net_id:
+ type: string
+ label: ONAP management network name or ID
+ description: Private network that connects ONAP components and the VNF
+ onap_private_subnet_id:
+ type: string
+ label: ONAP management sub-network name or ID
+ description: Private sub-network that connects ONAP components and the VNF
+ onap_private_net_cidr:
+ type: string
+ label: ONAP private network CIDR
+ description: The CIDR of the protected private network
+ cpe_signal_net_id:
+ type: string
+ label: vCPE private network name or ID
+ description: Private network that connects vCPE elements with vCPE infrastructure elements
+ cpe_signal_subnet_id:
+ type: string
+ label: vCPE private sub-network name or ID
+ description: vCPE private sub-network name or ID
+ cpe_signal_private_net_cidr:
+ type: string
+ label: vAAA private network CIDR
+ description: The CIDR of the vAAA private network
+ vbng_private_ip_0:
+ type: string
+ label: vBNG IN private IP address
+ description: Private IP address that is assigned to the vBNG IN
+ vbng_private_ip_1:
+ type: string
+ label: vBNG private IP address towards the ONAP management network
+ description: Private IP address that is assigned to the vBNG to communicate with ONAP components
+ vbng_private_ip_2:
+ type: string
+ label: vBNG to CPE_SIGNAL private IP address
+ description: Private IP address that is assigned to the vBNG in the CPE_SIGNAL network
+ vbng_private_ip_3:
+ type: string
+ label: vBNG to vGMUX private IP address
+ description: Private IP address that is assigned to the vBNG to vGMUX port
+ vbng_name_0:
+ type: string
+ label: vBNG name
+ description: Name of the vBNG
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ dcae_collector_ip:
+ type: string
+ label: DCAE collector IP address
+ description: IP address of the DCAE collector
+ dcae_collector_port:
+ type: string
+ label: DCAE collector port
+ description: Port of the DCAE collector
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ vpp_patch_url:
+ type: string
+ label: VPP Patch URL
+ description: URL for VPP patch for vBNG
+
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+
+ # Virtual BNG Instantiation
+ vbng_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: brgemu_bng_private_net_id }
+ fixed_ips: [{"subnet": { get_param: brgemu_bng_private_subnet_id }, "ip_address": { get_param: vbng_private_ip_0 }}]
+
+ vbng_private_1_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: onap_private_net_id }
+ fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vbng_private_ip_1 }}]
+
+ vbng_private_2_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: cpe_signal_net_id }
+ fixed_ips: [{"subnet": { get_param: cpe_signal_subnet_id }, "ip_address": { get_param: vbng_private_ip_2 }}]
+
+ vbng_private_3_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: bng_gmux_private_net_id }
+ fixed_ips: [{"subnet": { get_param: bng_gmux_private_subnet_id }, "ip_address": { get_param: vbng_private_ip_3 }}]
+
+ vbng_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vbng_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vbng_private_0_port }
+ - port: { get_resource: vbng_private_1_port }
+ - port: { get_resource: vbng_private_2_port }
+ - port: { get_resource: vbng_private_3_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __oam_ipaddr__: { get_param: vbng_private_ip_1 }
+ __brgemu_bng_net_ipaddr__: { get_param: vbng_private_ip_0 }
+ __cpe_signal_net_ipaddr__: { get_param: vbng_private_ip_2 }
+ __bng_gmux_net_ipaddr__: { get_param: vbng_private_ip_3 }
+ __oam_cidr__: { get_param: onap_private_net_cidr }
+ __brgemu_bng_cidr__: { get_param: brgemu_bng_private_net_cidr }
+ __cpe_signal_cidr__: { get_param: cpe_signal_private_net_cidr }
+ __bng_gmux_cidr__: { get_param: bng_gmux_private_net_cidr }
+ __dcae_collector_ip__: { get_param: dcae_collector_ip }
+ __dcae_collector_port__: { get_param: dcae_collector_port }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __vpp_patch_url__ : { get_param: vpp_patch_url }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ echo "__brgemu_bng_net_ipaddr__" > /opt/config/brgemu_bng_net_ipaddr.txt
+ echo "__cpe_signal_net_ipaddr__" > /opt/config/cpe_signal_net_ipaddr.txt
+ echo "__bng_gmux_net_ipaddr__" > /opt/config/bng_gmux_net_ipaddr.txt
+ echo "__oam_ipaddr__" > /opt/config/oam_ipaddr.txt
+ echo "__oam_cidr__" > /opt/config/oam_cidr.txt
+ echo "__bng_gmux_cidr__" > /opt/config/bng_gmux_net_cidr.txt
+ echo "__cpe_signal_cidr__" > /opt/config/cpe_signal_net_cidr.txt
+ echo "__brgemu_bng_cidr__" > /opt/config/brgemu_bng_net_cidr.txt
+ echo "__dcae_collector_ip__" > /opt/config/dcae_collector_ip.txt
+ echo "__dcae_collector_port__" > /opt/config/dcae_collector_port.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__vpp_patch_url__" > /opt/config/vpp_patch_url.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_bng_install.sh -o /opt/v_bng_install.sh
+ cd /opt
+ chmod +x v_bng_install.sh
+ ./v_bng_install.sh
diff --git a/lib/auto/testcase/vnf/vbrgemu/MANIFEST.json b/lib/auto/testcase/vnf/vbrgemu/MANIFEST.json
new file mode 100644
index 0000000..3911256
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbrgemu/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vbrgemu.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vbrgemu.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env
new file mode 100644
index 0000000..7719f55
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env
@@ -0,0 +1,28 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 or vbrg-base-ubuntu-16-04 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ compile_state: PUT THE COMPILE STATE (done, auto or build)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ vbrgemu_bng_private_net_id: zdfw1bngin01_private
+ vbrgemu_bng_private_subnet_id: zdfw1bngin01_sub_private
+ vbrgemu_bng_private_net_cidr: 10.3.0.0/24
+ #vbrgemu_private_net_id: zdfw1vbrgemu01_private
+ #vbrgemu_private_net_cidr: 192.168.1.0/24
+ vbrgemu_private_ip_0: 10.3.0.4
+ #vbrgemu_private_ip_1: 192.168.1.1
+ sdnc_ip: 10.0.7.1
+ vbrgemu_name_0: zdcpe1cpe01brgemu01
+ vnf_id: vCPE_Infrastructure_BGREMU_demo_app
+ vf_module_id: vCPE_Customer_BRGEMU
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vbrgemu_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ hc2vpp_source_repo_url: https://gerrit.fd.io/r/hc2vpp
+ hc2vpp_source_repo_branch: stable/1704
+ vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-option-82-for-vbrg/src/patches/VPP-Add-Option82-Nat-Filter-For-vBRG.patch
diff --git a/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml
new file mode 100644
index 0000000..a786995
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml
@@ -0,0 +1,253 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE vBRG Emulator (vBRGEMU)
+
+#######################################################################
+# #
+# PARAMETERS #
+# #
+# 0_port should get IP address from DHCP discover through vBNG #
+# DCAE is not monitoring the BRGEMULATOR #
+#######################################################################
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ vbrgemu_bng_private_net_id:
+ type: string
+ label: vBNG private network name or ID
+ description: Private network that connects vBRGEMU to vBNG
+ vbrgemu_bng_private_subnet_id:
+ type: string
+ label: vBNG private sub-network name or ID
+ description: vBNG private sub-network name or ID
+ vbrgemu_bng_private_net_cidr:
+ type: string
+ label: vBNG IN private network CIDR
+ description: The CIDR of the input side of vBNG private network
+ # vbrgemu_private_net_id:
+ # type: string
+ # label: vBRGEMU Home private network name or ID
+ # description: Private network that connects vBRGEMU to local devices
+ #vbrgemu_private_net_cidr:
+ # type: string
+ # label: vBRGEMU Home private network CIDR
+ # description: The CIDR of the input side of vBRGEMU Home private network
+ vbrgemu_private_ip_0:
+ type: string
+ label: vGW private IP address
+ description: Private IP address towards the BRGEMU-BNG network
+ #vbrgemu_private_ip_1:
+ # type: string
+ # label: vGW private IP address
+ # description: Private IP address towards the BRGEMU private network
+ vbrgemu_name_0:
+ type: string
+ label: vGW name
+ description: Name of the vGW
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ hc2vpp_source_repo_url:
+ type: string
+ label: Honeycomb Source Git Repo
+ description: URL for Honeycomb source codes
+ hc2vpp_source_repo_branch:
+ type: string
+ label: Honeycomb Source Git Branch
+ description: Git Branch for the Honeycomb source codes
+ vpp_patch_url:
+ type: string
+ label: VPP Patch URL
+ description: URL for VPP patch for vBRG Emulator
+ sdnc_ip:
+ type: string
+ label: SDNC ip address
+ description: SDNC ip address used to set NAT
+ compile_state:
+ type: string
+ label: Compile State
+ description: State to compile code or not
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+ #vbrgemu_private_network:
+ # type: OS::Neutron::Net
+ # properties:
+ # name: { get_param: vbrgemu_private_net_id }
+
+ #vbrgemu_private_subnet:
+ # type: OS::Neutron::Subnet
+ # properties:
+ # name: { get_param: vbrgemu_private_net_id }
+ # network_id: { get_resource: vbrgemu_private_network }
+ # cidr: { get_param: vbrgemu_private_net_cidr }
+
+ # Virtual BRG Emulator Instantiation
+ # 0_port should get IP address from DHCP discover through vBNG once the VNF is running
+ vbrgemu_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: vbrgemu_bng_private_net_id }
+ fixed_ips: [{"subnet": { get_param: vbrgemu_bng_private_subnet_id }, "ip_address": { get_param: vbrgemu_private_ip_0 }}]
+
+ #vbrgemu_private_1_port:
+ # type: OS::Neutron::Port
+ # properties:
+ # network: { get_resource: vbrgemu_private_network }
+ # fixed_ips: [{"subnet": { get_resource: vbrgemu_private_subnet }, "ip_address": { get_param: vbrgemu_private_ip_1 }}]
+
+ vbrgemu_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vbrgemu_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vbrgemu_private_0_port }
+ #- port: { get_resource: vbrgemu_private_1_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ # __brgemu_net_ipaddr__: { get_param: vbrgemu_private_ip_1 }
+ # __brgemu_cidr__: { get_param: vbrgemu_private_net_cidr }
+ __brgemu_bng_private_net_cidr__: { get_param: vbrgemu_bng_private_net_cidr }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __hc2vpp_source_repo_url__ : { get_param: hc2vpp_source_repo_url }
+ __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
+ __vpp_patch_url__ : { get_param: vpp_patch_url }
+ __sdnc_ip__ : { get_param: sdnc_ip }
+ __compile_state__ : { get_param: compile_state }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ #echo "__brgemu_net_ipaddr__" > /opt/config/brgemu_net_ipaddr.txt
+ #echo "__brgemu_cidr__" > /opt/config/brgemu_net_cidr.txt
+ echo "__brgemu_bng_private_net_cidr__" > /opt/config/brgemu_bng_private_net_cidr.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
+ echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
+ echo "__vpp_patch_url__" > /opt/config/vpp_patch_url.txt
+ echo "__sdnc_ip__" > /opt/config/sdnc_ip.txt
+ echo "__compile_state__" > /opt/config/compile_state.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_brgemu_install.sh -o /opt/v_brgemu_install.sh
+ cd /opt
+ chmod +x v_brgemu_install.sh
+ ./v_brgemu_install.sh
diff --git a/lib/auto/testcase/vnf/vgmux/MANIFEST.json b/lib/auto/testcase/vnf/vgmux/MANIFEST.json
new file mode 100644
index 0000000..1f62167
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgmux/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vgmux.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vgmux.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env
new file mode 100644
index 0000000..e81afa7
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env
@@ -0,0 +1,35 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ bng_gmux_private_net_id: zdfw1bngmux01_private
+ bng_gmux_private_subnet_id: zdfw1bngmux01_sub_private
+ mux_gw_private_net_id: zdfw1muxgw01_private
+ mux_gw_private_subnet_id: zdfw1muxgw01_sub_private
+ onap_private_net_id: PUT THE ONAP PRIVATE NETWORK NAME HERE
+ onap_private_subnet_id: PUT THE ONAP PRIVATE SUBNETWORK NAME HERE
+ onap_private_net_cidr: 10.0.0.0/16
+ bng_gmux_private_net_cidr: 10.1.0.0/24
+ mux_gw_private_net_cidr: 10.5.0.0/24
+ vgmux_private_ip_0: 10.1.0.20
+ vgmux_private_ip_1: 10.0.101.20
+ vgmux_private_ip_2: 10.5.0.20
+ vgmux_name_0: zdcpe1cpe01mux01
+ vnf_id: vCPE_Infrastructure_vGMUX_demo_app
+ vf_module_id: vCPE_Intrastructure_Metro_vGMUX
+ dcae_collector_ip: 10.0.4.102
+ dcae_collector_port: 8080
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vgmux_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ hc2vpp_source_repo_url: https://gerrit.fd.io/r/hc2vpp
+ hc2vpp_source_repo_branch: stable/1704
+ vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/Vpp-Add-VES-agent-for-vG-MUX.patch
+ hc2vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/Hc2vpp-Add-VES-agent-for-vG-MUX.patch
+ libevel_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/vCPE-vG-MUX-libevel-fixup.patch
diff --git a/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml
new file mode 100644
index 0000000..ecdb1b1
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml
@@ -0,0 +1,281 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE Infrastructue Metro vGMUX
+
+##############
+# #
+# PARAMETERS #
+# #
+##############
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ bng_gmux_private_net_id:
+ type: string
+ label: vBNG vGMUX private network name or ID
+ description: Private network that connects vBNG to vGMUX
+ bng_gmux_private_subnet_id:
+ type: string
+ label: vBNG vGMUX private sub-network name or ID
+ description: vBNG vGMUX private sub-network name or ID
+ bng_gmux_private_net_cidr:
+ type: string
+ label: vBNG vGMUX private network CIDR
+ description: The CIDR of the vBNG-vGMUX private network
+ mux_gw_private_net_id:
+ type: string
+ label: vGMUX vGWs network name or ID
+ description: Private network that connects vGMUX to vGWs
+ mux_gw_private_subnet_id:
+ type: string
+ label: vGMUX vGWs sub-network name or ID
+ description: vGMUX vGWs sub-network name or ID
+ mux_gw_private_net_cidr:
+ type: string
+ label: vGMUX private network CIDR
+ description: The CIDR of the vGMUX private network
+ onap_private_net_id:
+ type: string
+ label: ONAP management network name or ID
+ description: Private network that connects ONAP components and the VNF
+ onap_private_subnet_id:
+ type: string
+ label: ONAP management sub-network name or ID
+ description: Private sub-network that connects ONAP components and the VNF
+ onap_private_net_cidr:
+ type: string
+ label: ONAP private network CIDR
+ description: The CIDR of the protected private network
+ vgmux_private_ip_0:
+ type: string
+ label: vGMUX private IP address towards the vBNG-vGMUX private network
+ description: Private IP address that is assigned to the vGMUX to communicate with the vBNG
+ vgmux_private_ip_1:
+ type: string
+ label: vGMUX private IP address towards the ONAP management network
+ description: Private IP address that is assigned to the vGMUX to communicate with ONAP components
+ vgmux_private_ip_2:
+ type: string
+ label: vGMUX private IP address towards the vGMUX-vGW private network
+ description: Private IP address that is assigned to the vGMUX to communicate with vGWs
+ vgmux_name_0:
+ type: string
+ label: vGMUX name
+ description: Name of the vGMUX
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ dcae_collector_ip:
+ type: string
+ label: DCAE collector IP address
+ description: IP address of the DCAE collector
+ dcae_collector_port:
+ type: string
+ label: DCAE collector port
+ description: Port of the DCAE collector
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ hc2vpp_source_repo_url:
+ type: string
+ label: Honeycomb Source Git Repo
+ description: URL for Honeycomb source codes
+ hc2vpp_source_repo_branch:
+ type: string
+ label: Honeycomb Source Git Branch
+ description: Git Branch for the Honeycomb source codes
+ vpp_patch_url:
+ type: string
+ label: VPP Patch URL
+ description: URL for VPP patch for vG-MUX
+ hc2vpp_patch_url:
+ type: string
+ label: Honeycomb Patch URL
+ description: URL for Honeycomb patch for vG-MUX
+ libevel_patch_url:
+ type: string
+ label: libevel Patch URL
+ description: URL for libevel patch for vG-MUX
+
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+
+ # Virtual GMUX Instantiation
+ vgmux_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: bng_gmux_private_net_id }
+ fixed_ips: [{"subnet": { get_param: bng_gmux_private_subnet_id }, "ip_address": { get_param: vgmux_private_ip_0 }}]
+
+ vgmux_private_1_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: onap_private_net_id }
+ fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vgmux_private_ip_1 }}]
+
+ vgmux_private_2_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: mux_gw_private_net_id }
+ fixed_ips: [{"subnet": { get_param: mux_gw_private_subnet_id }, "ip_address": { get_param: vgmux_private_ip_2 }}]
+
+ vgmux_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vgmux_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vgmux_private_0_port }
+ - port: { get_resource: vgmux_private_1_port }
+ - port: { get_resource: vgmux_private_2_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __bng_mux_net_ipaddr__ : { get_param: vgmux_private_ip_0 }
+ __oam_ipaddr__ : { get_param: vgmux_private_ip_1 }
+ __mux_gw_net_ipaddr__ : { get_param: vgmux_private_ip_2 }
+ __bng_mux_net_cidr__ : { get_param: bng_gmux_private_net_cidr }
+ __oam_cidr__ : { get_param: onap_private_net_cidr }
+ __mux_gw_net_cidr__ : { get_param: mux_gw_private_net_cidr }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __hc2vpp_source_repo_url__ : { get_param: hc2vpp_source_repo_url }
+ __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
+ __vpp_patch_url__ : { get_param: vpp_patch_url }
+ __hc2vpp_patch_url__ : { get_param: hc2vpp_patch_url }
+ __libevel_patch_url__ : { get_param: libevel_patch_url }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ echo "__bng_mux_net_ipaddr__" > /opt/config/bng_mux_net_ipaddr.txt
+ echo "__oam_ipaddr__" > /opt/config/oam_ipaddr.txt
+ echo "__mux_gw_net_ipaddr__" > /opt/config/mux_gw_net_ipaddr.txt
+ echo "__bng_mux_net_cidr__" > /opt/config/bng_mux_net_cidr.txt
+ echo "__oam_cidr__" > /opt/config/oam_cidr.txt
+ echo "__mux_gw_net_cidr__" > /opt/config/mux_gw_net_cidr.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__vpp_patch_url__" > /opt/config/vpp_patch_url.txt
+ echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
+ echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
+ echo "__hc2vpp_patch_url__" > /opt/config/hc2vpp_patch_url.txt
+ echo "__libevel_patch_url__" > /opt/config/libevel_patch_url.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_gmux_install.sh -o /opt/v_gmux_install.sh
+ cd /opt
+ chmod +x v_gmux_install.sh
+ ./v_gmux_install.sh
diff --git a/lib/auto/testcase/vnf/vgw/MANIFEST.json b/lib/auto/testcase/vnf/vgw/MANIFEST.json
new file mode 100644
index 0000000..8178b1e
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgw/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vgw.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vgw.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env
new file mode 100644
index 0000000..f1cadb8
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env
@@ -0,0 +1,32 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ mux_gw_private_net_id: zdfw1muxgw01_private
+ mux_gw_private_subnet_id: zdfw1muxgw01_sub_private
+ mux_gw_private_net_cidr: 10.5.0.0/24
+ cpe_public_net_id: zdfw1cpe01_public
+ cpe_public_subnet_id: zdfw1cpe01_sub_public
+ cpe_public_net_cidr: 10.2.0.0/24
+ onap_private_net_id: PUT THE ONAP PRIVATE NETWORK NAME HERE
+ onap_private_subnet_id: PUT THE ONAP PRIVATE SUBNETWORK NAME HERE
+ onap_private_net_cidr: 10.0.0.0/16
+ vgw_private_ip_0: 10.5.0.21
+ vgw_private_ip_1: 10.0.101.30
+ vgw_private_ip_2: 10.2.0.3
+ vgw_name_0: zdcpe1cpe01gw01
+ vnf_id: vCPE_Infrastructure_GW_demo_app
+ vf_module_id: vCPE_Customer_GW
+ dcae_collector_ip: 10.0.4.102
+ dcae_collector_port: 8080
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vgw_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ hc2vpp_source_repo_url: https://gerrit.fd.io/r/hc2vpp
+ hc2vpp_source_repo_branch: stable/1704
diff --git a/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml
new file mode 100644
index 0000000..173ba6d
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml
@@ -0,0 +1,261 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE vGateway (vG)
+
+##############
+# #
+# PARAMETERS #
+# #
+##############
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ mux_gw_private_net_id:
+ type: string
+ label: vGMUX private network name or ID
+ description: Private network that connects vGMUX to vGWs
+ mux_gw_private_subnet_id:
+ type: string
+ label: vGMUX private sub-network name or ID
+ description: vGMUX private sub-network name or ID
+ mux_gw_private_net_cidr:
+ type: string
+ label: vGMUX private network CIDR
+ description: The CIDR of the vGMUX private network
+ onap_private_net_id:
+ type: string
+ label: ONAP management network name or ID
+ description: Private network that connects ONAP components and the VNF
+ onap_private_subnet_id:
+ type: string
+ label: ONAP management sub-network name or ID
+ description: Private sub-network that connects ONAP components and the VNF
+ onap_private_net_cidr:
+ type: string
+ label: ONAP private network CIDR
+ description: The CIDR of the protected private network
+ cpe_public_net_id:
+ type: string
+ label: vCPE network that emulates internetmanagement name or ID
+ description: Private network that connects vGW to emulated internet
+ cpe_public_subnet_id:
+ type: string
+ label: vCPE Public subnet
+ description: vCPE Public subnet
+ cpe_public_net_cidr:
+ type: string
+ label: vCPE public network CIDR
+ description: The CIDR of the vCPE public
+ vgw_private_ip_0:
+ type: string
+ label: vGW private IP address towards the vGMUX
+ description: Private IP address that is assigned to the vGW to communicate with vGMUX
+ vgw_private_ip_1:
+ type: string
+ label: vGW private IP address towards the ONAP management network
+ description: Private IP address that is assigned to the vGW to communicate with ONAP components
+ vgw_private_ip_2:
+ type: string
+ label: vGW private IP address towards the vCPE public network
+ description: Private IP address that is assigned to the vGW to communicate with vCPE public network
+ vgw_name_0:
+ type: string
+ label: vGW name
+ description: Name of the vGW
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ dcae_collector_ip:
+ type: string
+ label: DCAE collector IP address
+ description: IP address of the DCAE collector
+ dcae_collector_port:
+ type: string
+ label: DCAE collector port
+ description: Port of the DCAE collector
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ hc2vpp_source_repo_url:
+ type: string
+ label: Honeycomb Source Git Repo
+ description: URL for Honeycomb source codes
+ hc2vpp_source_repo_branch:
+ type: string
+ label: Honeycomb Source Git Branch
+ description: Git Branch for the Honeycomb source codes
+
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+ # Virtual GW Instantiation
+ vgw_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: mux_gw_private_net_id }
+ fixed_ips: [{"subnet": { get_param: mux_gw_private_subnet_id }, "ip_address": { get_param: vgw_private_ip_0 }}]
+
+ vgw_private_1_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: onap_private_net_id }
+ fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vgw_private_ip_1 }}]
+
+ vgw_private_2_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: cpe_public_net_id}
+ fixed_ips: [{"subnet": { get_param: cpe_public_subnet_id }, "ip_address": { get_param: vgw_private_ip_2 }}]
+
+ vgw_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vgw_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vgw_private_0_port }
+ - port: { get_resource: vgw_private_1_port }
+ - port: { get_resource: vgw_private_2_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __mux_gw_private_net_ipaddr__ : { get_param: vgw_private_ip_0 }
+ __oam_ipaddr__ : { get_param: vgw_private_ip_1 }
+ __oam_cidr__ : { get_param: onap_private_net_cidr }
+ __cpe_public_net_cidr__ : { get_param: cpe_public_net_cidr }
+ __mux_gw_private_net_cidr__ : { get_param: mux_gw_private_net_cidr }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __hc2vpp_source_repo_url__ : { get_param: hc2vpp_source_repo_url }
+ __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ echo "__oam_ipaddr__" > /opt/config/oam_ipaddr.txt
+ echo "__oam_cidr__" > /opt/config/oam_cidr.txt
+ echo "__cpe_public_net_cidr__" > /opt/config/cpe_public_net_cidr.txt
+ echo "__mux_gw_private_net_ipaddr__" > /opt/config/mux_gw_private_net_ipaddr.txt
+ echo "__mux_gw_private_net_cidr__" > /opt/config/mux_gw_private_net_cidr.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
+ echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_gw_install.sh -o /opt/v_gw_install.sh
+ cd /opt
+ chmod +x v_gw_install.sh
+ ./v_gw_install.sh
+
diff --git a/lib/auto/util/__init__.py b/lib/auto/util/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/auto/util/__init__.py
diff --git a/lib/auto/util/openstack_lib.py b/lib/auto/util/openstack_lib.py
new file mode 100644
index 0000000..4b62b72
--- /dev/null
+++ b/lib/auto/util/openstack_lib.py
@@ -0,0 +1,332 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""Module to manage OpenStack"""
+
+import os
+import re
+import sys
+import time
+import traceback
+
+from keystoneauth1 import loading
+from keystoneauth1 import session
+from keystoneclient import client as keystoneclient
+from glanceclient import client as glanceclient
+from neutronclient.neutron import client as neutronclient
+from novaclient import client as novaclient
+from heatclient import client as heatclient
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+DEFAULT_API_VERSION = '2'
+DEFAULT_ORCHESTRATION_API_VERSION = '1'
+
+openrc_base_key = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD']
+
+openrc_v3_exkey = ['OS_PROJECT_NAME',
+ 'OS_USER_DOMAIN_NAME',
+ 'OS_PROJECT_DOMAIN_NAME']
+
+openrc_v2_exkey = ['OS_TENANT_NAME']
+
+openrc_vars_mapping = {
+ 'OS_USERNAME': 'username',
+ 'OS_PASSWORD': 'password',
+ 'OS_AUTH_URL': 'auth_url',
+ 'OS_TENANT_NAME': 'tenant_name',
+ 'OS_USER_DOMAIN_NAME': 'user_domain_name',
+ 'OS_PROJECT_DOMAIN_NAME': 'project_domain_name',
+ 'OS_PROJECT_NAME': 'project_name',
+ }
+
+
+def check_identity_api_version():
+ identity_api_version = os.getenv('OS_IDENTITY_API_VERSION')
+ auth_url = os.getenv('OS_AUTH_URL')
+ if not auth_url:
+ raise RuntimeError("Require env var: OS_AUTH_URL")
+ auth_url_parse = auth_url.split('/')
+ url_tail = auth_url_parse[-1] if auth_url_parse[-1] else auth_url_parse[-2]
+ url_identity_version = url_tail.strip('v')
+ if not identity_api_version and \
+ identity_api_version != url_identity_version:
+ raise RuntimeError("identity api version not consistent")
+ return url_identity_version
+
+
+def check_image_api_version():
+ image_api_version = os.getenv('OS_IMAGE_API_VERSION')
+ if image_api_version:
+ return image_api_version
+ else:
+ return DEFAULT_API_VERSION
+
+
+def check_network_api_version():
+ network_api_version = os.getenv('OS_NETWORK_API_VERSION')
+ if network_api_version:
+ return network_api_version
+ else:
+ return DEFAULT_API_VERSION
+
+
+def check_compute_api_version():
+ compute_api_version = os.getenv('OS_COMPUTE_API_VERSION')
+ if compute_api_version:
+ return compute_api_version
+ else:
+ return DEFAULT_API_VERSION
+
+
+def check_orchestration_api_version():
+ orchestration_api_version = os.getenv('OS_ORCHESTRATION_API_VERSION')
+ if orchestration_api_version:
+ return orchestration_api_version
+ else:
+ return DEFAULT_ORCHESTRATION_API_VERSION
+
+
+def get_project_name(creds):
+ identity_version = check_identity_api_version()
+ if identity_version == '3':
+ return creds["project_name"]
+ elif identity_version == '2':
+ return creds["tenant_name"]
+ else:
+ raise RuntimeError("Unsupported identity version")
+
+
+def get_credentials():
+ creds = {}
+ creds_env_key = openrc_base_key
+ identity_api_version = check_identity_api_version()
+
+ if identity_api_version == '3':
+ creds_env_key += openrc_v3_exkey
+ elif identity_api_version == '2':
+ creds_env_key += openrc_v2_exkey
+ else:
+ raise RuntimeError("Unsupported identity version")
+
+ for env_key in creds_env_key:
+ env_value = os.getenv(env_key)
+ if env_value is None:
+ raise RuntimeError("Require env var: %s" % env_key)
+ else:
+ creds_var = openrc_vars_mapping.get(env_key)
+ creds.update({creds_var: env_value})
+
+ return creds
+
+
+def get_session_auth(creds):
+ loader = loading.get_plugin_loader('password')
+ auth = loader.load_from_options(**creds)
+ return auth
+
+
+def get_session(creds):
+ auth = get_session_auth(creds)
+ cacert = os.getenv('OS_CACERT')
+ insecure = os.getenv('OS_INSECURE', '').lower() == 'true'
+ verify = cacert if cacert else not insecure
+ return session.Session(auth=auth, verify=verify)
+
+
+def get_keystone_client(creds):
+ identity_api_version = check_identity_api_version()
+ sess = get_session(creds)
+ return keystoneclient.Client(identity_api_version,
+ session=sess,
+ interface=os.getenv('OS_INTERFACE', 'admin'))
+
+
+def get_glance_client(creds):
+ image_api_version = check_image_api_version()
+ sess = get_session(creds)
+ return glanceclient.Client(image_api_version, session=sess)
+
+
+def get_neutron_client(creds):
+ network_api_version = check_network_api_version()
+ sess = get_session(creds)
+ return neutronclient.Client(network_api_version, session=sess)
+
+
+def get_nova_client(creds):
+ compute_api_version = check_compute_api_version()
+ sess = get_session(creds)
+ return novaclient.Client(compute_api_version, session=sess)
+
+
+def get_heat_client(creds):
+ orchestration_api_version = check_orchestration_api_version()
+ sess = get_session(creds)
+ return heatclient.Client(orchestration_api_version, session=sess)
+
+
+def get_domain_id(keystone_client, domain_name):
+ domains = keystone_client.domains.list()
+ domain_id = None
+ for domain in domains:
+ if domain.name == domain_name:
+ domain_id = domain.id
+ break
+ return domain_id
+
+
+def get_project_id(keystone_client, project_name):
+ identity_version = check_identity_api_version()
+ if identity_version == '3':
+ projects = keystone_client.projects.list()
+ elif identity_version == '2':
+ projects = keystone_client.tenants.list()
+ else:
+ raise RuntimeError("Unsupported identity version")
+ project_id = None
+ for project in projects:
+ if project.name == project_name:
+ project_id = project.id
+ break
+ return project_id
+
+
+def get_image_id(glance_client, image_name):
+ images = glance_client.images.list()
+ image_id = None
+ for image in images:
+ if image.name == image_name:
+ image_id = image.id
+ break
+ return image_id
+
+
+def get_network_id(neutron_client, network_name):
+ networks = neutron_client.list_networks()['networks']
+ network_id = None
+ for network in networks:
+ if network['name'] == network_name:
+ network_id = network['id']
+ break
+ return network_id
+
+
+def get_security_group_id(neutron_client, secgroup_name, project_id=None):
+ security_groups = neutron_client.list_security_groups()['security_groups']
+ secgroup_id = []
+ for security_group in security_groups:
+ if security_group['name'] == secgroup_name:
+ secgroup_id = security_group['id']
+ if security_group['project_id'] == project_id or project_id is None:
+ break
+ return secgroup_id
+
+
+def get_secgroup_rule_id(neutron_client, secgroup_id, json_body):
+ secgroup_rules = \
+ neutron_client.list_security_group_rules()['security_group_rules']
+ secgroup_rule_id = None
+ for secgroup_rule in secgroup_rules:
+ rule_match = True
+ for key, value in json_body['security_group_rule'].items():
+ rule_match = rule_match and (value == secgroup_rule[key])
+ if rule_match:
+ secgroup_rule_id = secgroup_rule['id']
+ break
+ return secgroup_rule_id
+
+
+def get_keypair_id(nova_client, keypair_name):
+ keypairs = nova_client.keypairs.list()
+ keypair_id = None
+ for keypair in keypairs:
+ if keypair.name == keypair_name:
+ keypair_id = keypair.id
+ break
+ return keypair_id
+
+
+def create_project(keystone_client, creds, project_name, project_desc):
+ project_id = get_project_id(keystone_client, project_name)
+ if project_id:
+ return project_id
+
+ identity_version = check_identity_api_version()
+
+ if identity_version == '3':
+ domain_name = creds["user_domain_name"]
+ domain_id = get_domain_id(keystone_client, domain_name)
+ project = keystone_client.projects.create(
+ name=project_name,
+ description=project_desc,
+ domain=domain_id,
+ enabled=True)
+ elif identity_version == '2':
+ project = keystone_client.tenants.create(project_name,
+ project_desc,
+ enabled=True)
+ else:
+ raise RuntimeError("Unsupported identity version")
+
+ return project.id
+
+
+def create_image(glance_client, image_name, image_path, disk_format="qcow2",
+ container_format="bare", visibility="public"):
+ if not os.path.isfile(image_path):
+ raise RuntimeError("Image file not found: %s" % image_path)
+ image_id = get_image_id(glance_client, image_name)
+ if not image_id:
+ image = glance_client.images.create(name=image_name,
+ visibility=visibility,
+ disk_format=disk_format,
+ container_format=container_format)
+ image_id = image.id
+ with open(image_path) as image_data:
+ glance_client.images.upload(image_id, image_data)
+ return image_id
+
+
+def create_secgroup_rule(neutron_client, secgroup_id, protocol, direction,
+ port_range_min=None, port_range_max=None):
+ json_body = {'security_group_rule': {'direction': direction,
+ 'security_group_id': secgroup_id,
+ 'protocol': protocol}}
+
+ if bool(port_range_min) != bool(port_range_max):
+ raise RuntimeError("Start or end of protocol range is empty: [ %s, %s ]"
+ % (port_range_min, port_range_max))
+ elif port_range_min and port_range_max:
+ json_body['security_group_rule'].update({'port_range_min':
+ port_range_min})
+ json_body['security_group_rule'].update({'port_range_max':
+ port_range_max})
+
+ secgroup_id = get_secgroup_rule_id(neutron_client, secgroup_id, json_body)
+ if not secgroup_id:
+ neutron_client.create_security_group_rule(json_body)
+ return secgroup_id
+
+
+def update_compute_quota(nova_client, project_id, quotas):
+ nova_client.quotas.update(project_id, **quotas)
+
+
+def create_keypair(nova_client, keypair_name, keypair_path):
+ keypair_id = get_keypair_id(nova_client, keypair_name)
+ if not keypair_id:
+ with open(os.path.expanduser(keypair_path), 'r') as public_key:
+ key_data = public_key.read().decode('utf-8')
+ keypair = nova_client.keypairs.create(name=keypair_name,
+ public_key=key_data)
+ keypair_id = keypair.id
+ return keypair_id
+
diff --git a/lib/auto/util/util.py b/lib/auto/util/util.py
new file mode 100644
index 0000000..0033900
--- /dev/null
+++ b/lib/auto/util/util.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""Utility Module"""
+
+import os
+import git
+import urllib
+import yaml
+import traceback
+from Crypto.PublicKey import RSA
+from yaml_type import literal_unicode
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+
+def folded_unicode_representer(dumper, data):
+ return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>')
+
+
+def literal_unicode_representer(dumper, data):
+ return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
+
+
+def unicode_representer(dumper, uni):
+ node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
+ return node
+
+
+def mkdir(path):
+ path = path.strip()
+ path = path.rstrip("\\")
+ isExist = os.path.exists(path)
+ if not isExist:
+ os.makedirs(path)
+ return True
+ else:
+ return False
+
+
+def download(url, file_path):
+ if os.path.exists(file_path):
+ return False
+ else:
+ urllib.urlretrieve(url, file_path)
+ return True
+
+
+def git_clone(git_repo, git_branch, clone_path):
+ if not os.path.exists(clone_path):
+ git.Repo.clone_from(git_repo, clone_path, branch=git_branch)
+
+
+def read_file(file_path):
+ with open(os.path.expanduser(file_path)) as fd:
+ return fd.read()
+
+
+def read_yaml(yaml_path):
+ with open(os.path.expanduser(yaml_path)) as fd:
+ return yaml.safe_load(fd)
+
+
+def write_yaml(yaml_data, yaml_path, default_style=False):
+ yaml.add_representer(literal_unicode, literal_unicode_representer)
+ yaml.add_representer(unicode, unicode_representer)
+ with open(os.path.expanduser(yaml_path), 'w') as fd:
+ return yaml.dump(yaml_data, fd,
+ default_flow_style=default_style)
+
+
+def create_keypair(prikey_path, pubkey_path, size=2048):
+ key = RSA.generate(size)
+ with open(os.path.expanduser(prikey_path), 'w') as prikey_file:
+ os.chmod(prikey_path, 0600)
+ prikey_file.write(key.exportKey('PEM'))
+ pubkey = key.publickey()
+ with open(os.path.expanduser(pubkey_path), 'w') as pubkey_file:
+ pubkey_file.write(pubkey.exportKey('OpenSSH'))
diff --git a/lib/auto/util/yaml_type.py b/lib/auto/util/yaml_type.py
new file mode 100644
index 0000000..352fc7d
--- /dev/null
+++ b/lib/auto/util/yaml_type.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+class folded_unicode(unicode): pass
+class literal_unicode(unicode): pass
diff --git a/prepare.sh b/prepare.sh
new file mode 100755
index 0000000..75e1108
--- /dev/null
+++ b/prepare.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Run this script to setup Auto virtualenv and install Auto modules into
+# it.
+# Usage:
+# bash prepare.sh
+########################################################################
+
+pip install virtualenv
+virtualenv venv
+source ./venv/bin/activate
+pip install setuptools
+AUTO_DIR=$(pwd)
+cat << EOF >> venv/bin/activate
+export AUTO_DIR=$AUTO_DIR
+EOF
+python setup.py install
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..8ef8db6
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,8 @@
+GitPython
+pycrypto
+keystoneauth1>=3.1.0
+python-keystoneclient>=3.8.0
+python-glanceclient>=2.8.0
+python-neutronclient>=6.3.0
+python-novaclient>=9.0.0
+python-heatclient>=1.6.1
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..59a3c91
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+import os
+from setuptools import setup, find_packages
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+
+requirement_path = os.path.join(
+ os.path.dirname(__file__), 'requirements.txt')
+with open(requirement_path, 'r') as fd:
+ requirements = [line.strip() for line in fd if line != '\n']
+
+setup(
+ name="auto",
+ version='1.0.0',
+ package_dir={'': 'lib'},
+ packages=find_packages('lib'),
+ include_package_data=True,
+ install_requires=requirements
+)
diff --git a/setup/onap_on_openstack/__init__.py b/setup/onap_on_openstack/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/setup/onap_on_openstack/__init__.py
diff --git a/setup/onap_on_openstack/config.yml b/setup/onap_on_openstack/config.yml
new file mode 100644
index 0000000..88c5db1
--- /dev/null
+++ b/setup/onap_on_openstack/config.yml
@@ -0,0 +1,64 @@
+---
+
+onap_stack_name: onap
+
+onap_demo_git:
+ repo: https://gerrit.onap.org/r/demo
+ branch: amsterdam
+ heat_template: heat/ONAP/onap_openstack.yaml
+ heat_env: heat/ONAP/onap_openstack.env
+
+onap_vm_images:
+ ubuntu_1404_image:
+ name: Ubuntu_14.04_trusty
+ url: https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ ubuntu_1604_image:
+ name: Ubuntu_16.04_xenial
+ url: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ dcae_centos_7_image:
+ name: Centos_7
+ url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1711.qcow2
+
+onap_secgroup_rules:
+ - protocol: tcp
+ direction: ingress
+ port_range_min: 1
+ port_range_max: 65535
+
+ - protocol: icmp
+ direction: ingress
+ port_range_min:
+ port_range_max:
+
+onap_quota:
+ instances: 100
+ cores: 100
+ ram: 204800
+
+onap_keypair:
+ name: onap_key
+ pubkey_path: ~/.ssh/id_rsa.pub
+
+onap_user_config:
+ public_net_name: ext-net
+ flavor_small: m1.small
+ flavor_medium: m1.medium
+ flavor_large: m1.large
+ flavor_xlarge: m1.xlarge
+ flavor_xxlarge: m1.xlarge
+ openstack_tenant_name: admin
+ openstack_username: admin
+ openstack_api_key: 49ef27251b38c5124378010e7be8758eb
+ horizon_url: https://192.168.22.222:80
+ keystone_url: https://192.168.22.222:5000
+ dns_list: ["8.8.8.8"]
+ external_dns: 8.8.8.8
+ dns_forwarder: 192.168.22.222
+ dnsaas_config_enabled: true
+ dnsaas_region: RegionOne
+ dnsaas_keystone_url: https://192.168.22.222:5000
+ dnsaas_tenant_name: service
+ dnsaas_username: designate
+ dnsaas_password: 853ff4c5315221ce5a042954eac38ea6692092a33c
+ dcae_keystone_url: https://192.168.22.222:5000
+ dcae_domain: dcaeg2.onap.org
diff --git a/setup/onap_on_openstack/launch_onap.py b/setup/onap_on_openstack/launch_onap.py
new file mode 100644
index 0000000..948adfc
--- /dev/null
+++ b/setup/onap_on_openstack/launch_onap.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""Launch ONAP on OpenStack"""
+
+import argparse
+from onap_os_builder import ONAP_os_builder
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+
+def read_cli_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--config', '-c',
+ dest = 'config',
+ action = 'store',
+ default = './config.yml',
+ help = 'config file')
+ return parser.parse_args()
+
+
+if __name__ == '__main__':
+ args = read_cli_args()
+ config = args.config
+ onap_builder = ONAP_os_builder(config)
+ onap_builder.clone_demo_code()
+ onap_builder.create_onap_vm_images()
+ onap_builder.create_onap_secgroup_rules()
+ onap_builder.set_quota()
+ onap_builder.create_onap_key()
+ onap_builder.set_onap_stack_params()
+ onap_builder.create_onap_stack()
diff --git a/setup/onap_on_openstack/onap_os_builder.py b/setup/onap_on_openstack/onap_os_builder.py
new file mode 100644
index 0000000..b6c5608
--- /dev/null
+++ b/setup/onap_on_openstack/onap_os_builder.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""ONAP builder for OpenStack"""
+
+import os
+import sys
+
+import auto.util.openstack_lib as os_lib
+import auto.util.util as util
+from auto.util.yaml_type import literal_unicode
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+
+class ONAP_os_builder(object):
+ """Prepare the OpenStack environment and launch ONAP stack"""
+ def __init__(self, config_file):
+
+ self.config = util.read_yaml(config_file)
+ self.stack_name = self.config['onap_stack_name']
+ self.demo_git = self.config['onap_demo_git']
+ self.vm_images = self.config['onap_vm_images']
+ self.secgroup_rules = self.config['onap_secgroup_rules']
+ self.quota = self.config['onap_quota']
+ self.keypair = self.config['onap_keypair']
+ self.user_config = self.config['onap_user_config']
+
+ self.creds = os_lib.get_credentials()
+ self.keystone_client = os_lib.get_keystone_client(self.creds)
+ self.glance_client = os_lib.get_glance_client(self.creds)
+ self.neutron_client = os_lib.get_neutron_client(self.creds)
+ self.nova_client = os_lib.get_nova_client(self.creds)
+ self.heat_client = os_lib.get_heat_client(self.creds)
+
+ self.auto_dir = os.getenv('AUTO_DIR')
+ self.work_dir = os.path.join(self.auto_dir, "work")
+ self.demo_repo_dir = os.path.join(self.work_dir, "demo")
+ self.heat_template = os.path.join(self.demo_repo_dir,
+ self.demo_git['heat_template'])
+ self.heat_env = os.path.join(self.demo_repo_dir,
+ self.demo_git['heat_env'])
+ self.image_dir = os.path.join(self.work_dir, "images")
+ self.keypair_dir = os.path.join(self.work_dir, "keypair")
+ util.mkdir(self.work_dir)
+
+
+ def clone_demo_code(self):
+ util.git_clone(self.demo_git['repo'], self.demo_git['branch'],
+ self.demo_repo_dir)
+
+
+ def prepare_images(self):
+ util.mkdir(self.image_dir)
+ for _, image_info in self.vm_images.items():
+ image_path = os.path.join(self.image_dir, image_info['name'])
+ util.download(image_info['url'], image_path)
+
+
+ def create_onap_vm_images(self):
+ self.prepare_images()
+ for _, image_info in self.vm_images.items():
+ image_path = os.path.join(self.image_dir, image_info['name'])
+ os_lib.create_image(self.glance_client,
+ image_info['name'],
+ image_path)
+
+
+ def create_onap_secgroup_rules(self):
+ project_name = os_lib.get_project_name(self.creds)
+ project_id = os_lib.get_project_id(self.keystone_client, project_name)
+ secgroup_id = os_lib.get_security_group_id(self.neutron_client,
+ "default", project_id)
+ for secgroup_rule in self.secgroup_rules:
+ os_lib.create_secgroup_rule(self.neutron_client, secgroup_id,
+ secgroup_rule['protocol'],
+ secgroup_rule['direction'],
+ secgroup_rule['port_range_min'],
+ secgroup_rule['port_range_max'])
+
+
+ def set_quota(self):
+ project_name = os_lib.get_project_name(self.creds)
+ project_id = os_lib.get_project_id(self.keystone_client, project_name)
+ os_lib.update_compute_quota(self.nova_client, project_id, self.quota)
+
+
+ def create_onap_key(self):
+ os_lib.create_keypair(self.nova_client, self.keypair['name'],
+ self.keypair['pubkey_path'])
+
+
+ def set_onap_stack_params(self):
+ stack_config = util.read_yaml(self.heat_env)['parameters']
+
+ user_config = self.user_config
+ user_config.update({'ubuntu_1404_image':
+ self.vm_images['ubuntu_1404_image']['name']})
+ user_config.update({'ubuntu_1604_image':
+ self.vm_images['ubuntu_1604_image']['name']})
+ user_config.update({'dcae_centos_7_image':
+ self.vm_images['dcae_centos_7_image']['name']})
+
+ pubkey_data = util.read_file(self.keypair['pubkey_path']).strip('\n')
+ user_config.update({'key_name': self.keypair['name']})
+ user_config.update({'pub_key': literal_unicode(pubkey_data)})
+
+ util.mkdir(self.keypair_dir)
+ prikey_path = os.path.join(self.keypair_dir, 'private.key')
+ pubkey_path = os.path.join(self.keypair_dir, 'public.key')
+ if not os.path.isfile(prikey_path) or not os.path.isfile(pubkey_path):
+ util.create_keypair(prikey_path, pubkey_path)
+
+ dcae_prikey_data = util.read_file(prikey_path).strip('\n')
+ dcae_pubkey_data = util.read_file(pubkey_path).strip('\n')
+ user_config.update({'dcae_public_key':
+ literal_unicode(dcae_prikey_data)})
+ user_config.update({'dcae_private_key':
+ literal_unicode(dcae_pubkey_data)})
+
+ public_net_id = os_lib.get_network_id(
+ self.neutron_client,
+ user_config['public_net_name']
+ )
+ user_config.update({'public_net_id': public_net_id})
+ project_id = os_lib.get_project_id(
+ self.keystone_client,
+ user_config['openstack_tenant_name']
+ )
+ user_config.update({'openstack_tenant_id': project_id})
+
+ for key, value in user_config.items():
+ stack_config[key] = value
+ heat_env_data = {'parameters': stack_config}
+ util.write_yaml(heat_env_data, self.heat_env)
+
+
+ def create_onap_stack(self):
+ stack_args = {}
+ stack_args['stack_name'] = self.stack_name
+ stack_args['template'] = util.read_file(self.heat_template)
+ stack_args['parameters'] = util.read_yaml(self.heat_env)['parameters']
+ self.heat_client.stacks.create(**stack_args)
+
diff --git a/vcpe_spinup.sh b/vcpe_spinup.sh
new file mode 100644
index 0000000..0dd8a46
--- /dev/null
+++ b/vcpe_spinup.sh
@@ -0,0 +1,99 @@
+#!/bin/sh
+# spin up a new vcpe instance
+
+URLSPINUP = 'http://127.0.0.1:18003/vnf/v1'
+URLSTATUS = 'http://127.0.0.1:18002/resmgr/v1/dev?dev_id='
+
+URLINTF = 'http://127.0.0.1:18002/resmgr/v1/dev/if'
+URLINTFCONF = 'http://127.0.0.1:18002/ifconfig/v1'
+URLROUTE = 'http://127.0.0.1:18002/rtconfig/v1'
+
+AUTH = 'admin:admin'
+
+dev_id = "2188032VRE2018011814131903B81436"
+vnf_name = "vcpe_20180118150535"
+esn = "2188032VRE2018011814131903B81436"
+
+function spinup {
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{ "dev_id": $1, "vnf_name": $2, "ctrler_id": "HW_AC_CAMPUS_CQ2", "vnfm_id": "HW_VNFM_CQ", "dev_vendor": "HUAWEI", "dev_model": "VNFM", "vnf_type": "VCPE", "vnf_esn": $3, "netconf_cfg": { "ipv4": "172.17.11.122", "ipv4_gw": "172.17.11.1"}, "status": "Active" }' $URLSPINUP
+ echo 'trying to spin up a new vcpe instance'
+ return result
+
+}
+
+function checkstatus {
+
+ URL = {$URLSTATUS}{$1}
+
+ result = curl -I -H "Content-type: application/json" -X GET -u $AUTH $URL
+ status = jq '.status' $result
+ return status
+
+}
+
+function cfgwaninterface {
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{"dev_id": $1, "if_name": $2, "if_lable": "WAN", "access_ipv4": "192.168.40.30"}' $URLINTF
+
+ if [ $result -eq 200]; then
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{"dev_id": $1, "if_name": $2, "ip_cfg": {"ip":$3, "gateway": $4} }' $URLINTFCONF
+ return result
+
+ else
+ return result
+
+ fi
+
+}
+
+function cfgdefaultroute {
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{"dev_id": $1, "static_rt": {"dst":"0.0.0.0/0", "nexthop": $2} }' $URLROUTE
+ return result
+
+}
+
+function enablewan {
+
+ result = cfgwaninterface $1 $2 $3 $4
+ if [ $result -eq 200]; then
+ result = cfgdefaultroute $1 $4
+ return result
+ else
+ return result
+ fi
+
+}
+
+data = json
+result = sinup $dev_id $vnf_name $esn
+
+if [ $result -eq 200 ]; then
+
+ echo 'vcpe is being spinned up, wait...'
+
+ while true
+ do
+ sleep 30
+ status = checkstatus $dev_id
+ if [ $status -eq "Active" ]; then
+ echo 'vcpe is active now!'
+ break
+ fi
+ done
+
+ result = enablewan $dev_id "GigabitEthernet0/0/1" "192.168.40.30" "192.168.40.254"
+ if [ $result -eq 200]; then
+ echo 'vcpe is ready for service!'
+ fi
+
+elif [ $result -gt 300 ]; then
+ echo 'error happens!'
+else
+ echo 'illegal json result!'
+fi
+
+
+
diff --git a/vfw_spinup.sh b/vfw_spinup.sh
new file mode 100644
index 0000000..9c9cd82
--- /dev/null
+++ b/vfw_spinup.sh
@@ -0,0 +1,53 @@
+#!/bin/sh
+# spin up a new vfw instance
+
+URLSPINUP = 'http://127.0.0.1:18003/vnf/v1'
+URLSTATUS = 'http://127.0.0.1:18002/resmgr/v1/dev?dev_id='
+AUTH = 'admin:admin'
+
+dev_id = "0488033DDN20180118150535B7F76420"
+vnf_name = "vfw_20180118150535"
+esn = "0488033DDN20180118150535B7F76420"
+
+function spinup {
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{ "dev_id": $1, "vnf_name": $2, "ctrler_id": "HW_AC_CAMPUS_CQ2", "vnfm_id": "HW_VNFM_CQ", "dev_vendor": "HUAWEI", "dev_model": "VNFM", "vnf_type": "VFW", "vnf_esn": $3, "netconf_cfg": { "ipv4": "192.168.20.129", "mask_bit": 24, "ipv4_gw": "192.168.20.254"}, "wan_cfg": {"ipv4": "192.168.40.40", "mask_bit": 24, "ipv4_gw": "192.168.40.254"}, "status": "Active" }' $URLSPINUP
+ echo 'trying to spin up a new vfw instance'
+ return result
+
+}
+
+function checkstatus {
+
+ URL = {$URLSTATUS}{$1}
+
+ result = curl -I -H "Content-type: application/json" -X GET -u $AUTH $URL
+ status = jq '.status' $result
+ return status
+
+}
+
+data = json
+result = sinup $dev_id $vnf_name $esn
+
+if [ $result -eq 200 ]; then
+
+ echo 'vfw is being spinned up, wait...'
+
+ while true
+ do
+ sleep 30
+ status = checkstatus $dev_id
+ if [ $status -eq "Active" ]; then
+ echo 'vfw is active now!'
+ break
+ done
+
+elif [ $result -gt 300 ]; then
+ echo 'error happens!'
+else
+ echo 'illegal json result!'
+fi
+
+
+
diff --git a/vpn_subscribe.sh b/vpn_subscribe.sh
new file mode 100644
index 0000000..fc45454
--- /dev/null
+++ b/vpn_subscribe.sh
@@ -0,0 +1,220 @@
+#!/bin/sh
+# test script for vpn subscribing
+
+L3VPN = 3
+
+AUTH = 'admin:admin'
+URLTENANT = 'http://127.0.0.1:8091/v1/tenant'
+URLCPE = 'http://127.0.0.1:8091/v1/cpe'
+URLSTATUS = 'http://127.0.0.1:18002/resmgr/v1/dev?dev_id='
+URLINTERFACE = 'http://127.0.0.1:8091/v1/cpe/interface'
+URLSERVICE = 'http://127.0.0.1:8091/v1/vpn'
+
+tenantid = 'opnfv'
+tenantname = 'opnfv'
+
+esn1 = '21500102003GH5000971'
+interface1 = 'GigabitEthernet0/0/3'
+vlan1 = 3006
+subnet1 = '172.168.2.0'
+mask2 = 24
+gateway1 = '10.10.2.2'
+
+esn2 = '2102114469P0H3000011'
+interface2 = '10GE6/0/16'
+vlan2 = 3000
+subnet2 = '172.168.1.0'
+mask2 = 24
+gateway2 = '10.10.1.2'
+
+function createtenant {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{ "tenant_id": $1,
+ "tenant_name":$2, "cert_type": "A", "cert_num": "000000000000000000001"}' -u $AUTH $URLTENANT
+ echo 'tenant $1 is being created!'
+ return result
+
+}
+
+function enablecpe {
+
+ cpe_model = "4096"
+ if [ $3 -eq "IMG"]; then
+ cpe_model = "4098"
+ fi
+ if [ $3 -eq "UCPE"]; then
+ cpe_model = "4096"
+ fi
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d ' { "cpe_vendor": "HUAWEI", "tenant_id": $2, "ctrler_id": "HW_AC_CAMPUS_CQ1", "access_type": 0, "cpe_model": $cpe_moel, "cpe_esn": $1 }' -u $URLCPE
+ echo 'cpe $1 is being activated!'
+ return result
+
+}
+
+function checkstatus {
+
+ URL = {$URLSTATUS}{$1}
+
+ result = curl -I -H "Content-type: application/json" -X GET -u $AUTH $URL
+ status = jq '.status' $result
+ return status
+
+}
+
+function cfglaninterface {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{ "cpe_esn": $1, "interfaces": [ { "if_name": $2, "if_vlan": $3, "if_ip":$4, "if_mask":"24"}] }' -u $URLINTERFACE
+ echo 'cpe $1 interface $2 vlan $3 is being configured!'
+ return result
+
+}
+
+function enablesite2site {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{
+ "tenant_id": $1,
+ "bandwidth": 51200,
+ "order_id": "20180116-16",
+ "operation": 1,
+ "order_name": "20180116-16",
+ "internet_cfg": null,
+ "vas_cfg": null,
+ "vpn_config": [
+ {
+ "tenant_id": $1,
+ "vpn_id": 1,
+ "vpn_type": $L3VPN,
+ "local_device": $2,
+ "dl_bw": 1000,
+ "ul_bw": 1000,
+ "route_policy": false,
+ "qos_grade": null,
+ "local_type": 0,
+ "local_access": {
+ "web_enable": 1,
+ "dhcp_server": 1,
+ "portvlan_list": [
+
+ {
+ "port": $3,
+ "vlan": $4
+ }
+ ],
+ "subnet_list": [
+ {
+ "ipv4": $5,
+ "mask_bit": "24",
+ "gateway": "$6
+ }
+ ]
+ },
+ "remote_device": $7,
+ "remote_type": 0,
+ "remote_access": {
+ "dhcp_server": 1,
+ "web_enable": 1,
+ "portvlan_list": [
+
+ {
+ "port": $8,
+ "vlan": $9
+ }
+ ],
+ "subnet_list": [
+
+ {
+ "ipv4": $10,
+ "mask_bit": 24,
+ "gateway": $11
+ }
+ ]
+ }
+ }
+ ]
+}' -u $URLSERVICE
+ echo 'site2site between cpe $2 and cpe $3 is being activated for tenant $1!'
+ return result
+
+}
+
+tenantresult = createtenant $tenantid $tenantname
+if [ $tenantresult -eq 201 ]; then
+
+ echo 'tenant opnfv has been successfully created!'
+
+ ucperesult = enablecpe $esn1 $tenantid "UCPE"
+ if [ $ucperesult -eq 201 ]; then
+ echo 'cpe $esn1 has been successfully enabled!'
+ elif [ $cpe1result -eq 404 ]; then
+ echo 'tenant $tenantid not exits!'
+ elif [ $cpe1result -eq 409 ]; then
+ echo 'cpe $esn1 already exists!'
+ else
+ echo 'illegal result!'
+
+ imgresult = enablecpe $esn2 $tenantid "IMG"
+ if [ $imgresult -eq 201 ]; then
+ echo 'cpe $esn2 has been successfully enabled!'
+ elif [ $cpe2result -eq 404 ]; then
+ echo 'tenant $tenantid not exits!'
+ elif [ $cpe2result -eq 409 ]; then
+ echo 'cpe $esn2 already exists!'
+ else
+ echo 'illegal result!'
+
+ while true
+ do
+ sleep 30
+ ucpestatus = checkstatus $esn1
+ imgstatus = checkstatus $esn2
+ if [ $ucpestatus -eq "Active" ] && [ $imgstatus -eq "Active"]; then
+ echo 'ucpe and img are both ready for service!'
+ break
+ fi
+ done
+
+
+ ucpeinterfaceresult = cfglaninterface $esn1 $interface1 $vlan1 $ip1
+ if [ $ucpeinterfaceresult -eq 200 ]; then
+ echo 'cpe $esn1 interface $interface1 has been successfully configured!'
+ elif [ $ucpeinterfaceresult -eq 404 ]; then
+ echo 'cpe $esn1 not exits!'
+ else
+ echo 'illegal result!'
+
+ imginterfaceresult = cfglaninterface $esn2 $interface2 $vlan2 $ip2
+ if [ $imginterfaceresult -eq 200 ]; then
+ echo 'cpe $esn2 interface $interface2 has been successfully configured!'
+ elif [ $imginterfaceresult -eq 404 ]; then
+ echo 'cpe $esn1 not exits!'
+ else
+ echo 'illegal result!'
+
+ serviceresult = enablesite2site $tenantid $esn1 $interface1 $vlan1 $subnet1 $gateway1 $esn2 $interface2 $vlan2 $subnet2 $gateway2
+ if [ $serviceresult -eq 201 ]; then
+ echo 'l3vpn has been successfully enabled between cpe $esn1 and cpe $esn2!'
+ elif [ $serviceresult -eq 404 ]; then
+ echo 'tenant or cpe not exits!'
+ elif [ $serviceresult -eq 409 ]; then
+ echo 'l3vpn already enabled!'
+ elif [ $serviceresult -eq 500 ]; then
+ echo $serviceresult
+ else
+ echo 'illegal result!'
+
+
+elif [ $result -eq 409 ]; then
+ echo 'tenant already exists!'
+else
+ echo 'illegal result!'
+fi
+
+
+
+
+
+
+
+
+
diff --git a/vpn_unsubscribe.sh b/vpn_unsubscribe.sh
new file mode 100644
index 0000000..905a2a3
--- /dev/null
+++ b/vpn_unsubscribe.sh
@@ -0,0 +1,220 @@
+#!/bin/sh
+# test script for vpn subscribing
+
+L3VPN = 3
+
+AUTH = 'admin:admin'
+URLTENANT = 'http://127.0.0.1:8091/v1/tenant'
+URLCPE = 'http://127.0.0.1:8091/v1/cpe'
+URLSTATUS = 'http://127.0.0.1:18002/resmgr/v1/dev?dev_id='
+URLINTERFACE = 'http://127.0.0.1:8091/v1/cpe/interface'
+URLSERVICE = 'http://127.0.0.1:8091/v1/vpn'
+
+tenantid = 'opnfv'
+tenantname = 'opnfv'
+
+esn1 = '21500102003GH5000971'
+interface1 = 'GigabitEthernet0/0/3'
+vlan1 = 3006
+subnet1 = '172.168.2.0'
+mask2 = 24
+gateway1 = '10.10.2.2'
+
+esn2 = '2102114469P0H3000011'
+interface2 = '10GE6/0/16'
+vlan2 = 3000
+subnet2 = '172.168.1.0'
+mask2 = 24
+gateway2 = '10.10.1.2'
+
+function createtenant {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{ "tenant_id": $1,
+ "tenant_name":$2, "cert_type": "A", "cert_num": "000000000000000000001"}' -u $AUTH $URLTENANT
+ echo 'tenant $1 is being created!'
+ return result
+
+}
+
+function enablecpe {
+
+ cpe_model = "4096"
+ if [ $3 -eq "IMG"]; then
+ cpe_model = "4098"
+ fi
+ if [ $3 -eq "UCPE"]; then
+ cpe_model = "4096"
+ fi
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d ' { "cpe_vendor": "HUAWEI", "tenant_id": $2, "ctrler_id": "HW_AC_CAMPUS_CQ1", "access_type": 0, "cpe_model": $cpe_moel, "cpe_esn": $1 }' -u $URLCPE
+ echo 'cpe $1 is being activated!'
+ return result
+
+}
+
+function checkstatus {
+
+ URL = {$URLSTATUS}{$1}
+
+ result = curl -I -H "Content-type: application/json" -X GET -u $AUTH $URL
+ status = jq '.status' $result
+ return status
+
+}
+
+function cfglaninterface {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{ "cpe_esn": $1, "interfaces": [ { "if_name": $2, "if_vlan": $3, "if_ip":$4, "if_mask":"24"}] }' -u $URLINTERFACE
+ echo 'cpe $1 interface $2 vlan $3 is being configured!'
+ return result
+
+}
+
+function enablesite2site {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{
+ "tenant_id": $1,
+ "bandwidth": 51200,
+ "order_id": "20180116-16",
+ "operation": 0,
+ "order_name": "20180116-16",
+ "internet_cfg": null,
+ "vas_cfg": null,
+ "vpn_config": [
+ {
+ "tenant_id": $1,
+ "vpn_id": 1,
+ "vpn_type": $L3VPN,
+ "local_device": $2,
+ "dl_bw": 1000,
+ "ul_bw": 1000,
+ "route_policy": false,
+ "qos_grade": null,
+ "local_type": 0,
+ "local_access": {
+ "web_enable": 1,
+ "dhcp_server": 1,
+ "portvlan_list": [
+
+ {
+ "port": $3,
+ "vlan": $4
+ }
+ ],
+ "subnet_list": [
+ {
+ "ipv4": $5,
+ "mask_bit": "24",
+ "gateway": "$6
+ }
+ ]
+ },
+ "remote_device": $7,
+ "remote_type": 0,
+ "remote_access": {
+ "dhcp_server": 1,
+ "web_enable": 1,
+ "portvlan_list": [
+
+ {
+ "port": $8,
+ "vlan": $9
+ }
+ ],
+ "subnet_list": [
+
+ {
+ "ipv4": $10,
+ "mask_bit": 24,
+ "gateway": $11
+ }
+ ]
+ }
+ }
+ ]
+}' -u $URLSERVICE
+ echo 'site2site between cpe $2 and cpe $3 is being activated for tenant $1!'
+ return result
+
+}
+
+tenantresult = createtenant $tenantid $tenantname
+if [ $tenantresult -eq 201 ]; then
+
+ echo 'tenant opnfv has been successfully created!'
+
+ ucperesult = enablecpe $esn1 $tenantid "UCPE"
+ if [ $ucperesult -eq 201 ]; then
+ echo 'cpe $esn1 has been successfully enabled!'
+ elif [ $cpe1result -eq 404 ]; then
+ echo 'tenant $tenantid not exits!'
+ elif [ $cpe1result -eq 409 ]; then
+ echo 'cpe $esn1 already exists!'
+ else
+ echo 'illegal result!'
+
+ imgresult = enablecpe $esn2 $tenantid "IMG"
+ if [ $imgresult -eq 201 ]; then
+ echo 'cpe $esn2 has been successfully enabled!'
+ elif [ $cpe2result -eq 404 ]; then
+ echo 'tenant $tenantid not exits!'
+ elif [ $cpe2result -eq 409 ]; then
+ echo 'cpe $esn2 already exists!'
+ else
+ echo 'illegal result!'
+
+ while true
+ do
+ sleep 30
+ ucpestatus = checkstatus $esn1
+ imgstatus = checkstatus $esn2
+ if [ $ucpestatus -eq "Active" ] && [ $imgstatus -eq "Active"]; then
+ echo 'ucpe and img are both ready for service!'
+ break
+ fi
+ done
+
+
+ ucpeinterfaceresult = cfglaninterface $esn1 $interface1 $vlan1 $ip1
+ if [ $ucpeinterfaceresult -eq 200 ]; then
+ echo 'cpe $esn1 interface $interface1 has been successfully configured!'
+ elif [ $ucpeinterfaceresult -eq 404 ]; then
+ echo 'cpe $esn1 not exits!'
+ else
+ echo 'illegal result!'
+
+ imginterfaceresult = cfglaninterface $esn2 $interface2 $vlan2 $ip2
+ if [ $imginterfaceresult -eq 200 ]; then
+ echo 'cpe $esn2 interface $interface2 has been successfully configured!'
+ elif [ $imginterfaceresult -eq 404 ]; then
+ echo 'cpe $esn1 not exits!'
+ else
+ echo 'illegal result!'
+
+ serviceresult = enablesite2site $tenantid $esn1 $interface1 $vlan1 $subnet1 $gateway1 $esn2 $interface2 $vlan2 $subnet2 $gateway2
+ if [ $serviceresult -eq 201 ]; then
+ echo 'l3vpn has been successfully enabled between cpe $esn1 and cpe $esn2!'
+ elif [ $serviceresult -eq 404 ]; then
+ echo 'tenant or cpe not exits!'
+ elif [ $serviceresult -eq 409 ]; then
+ echo 'l3vpn already enabled!'
+ elif [ $serviceresult -eq 500 ]; then
+ echo $serviceresult
+ else
+ echo 'illegal result!'
+
+
+elif [ $result -eq 409 ]; then
+ echo 'tenant already exists!'
+else
+ echo 'illegal result!'
+fi
+
+
+
+
+
+
+
+
+