aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml60
-rw-r--r--docs/release/release-notes/Auto-release-notes.rst42
-rw-r--r--docs/release/release-notes/index.rst5
-rw-r--r--docs/release/userguide/UC01-feature.userguide.rst14
-rw-r--r--docs/release/userguide/UC02-feature.userguide.rst58
-rw-r--r--docs/release/userguide/UC03-feature.userguide.rst29
-rw-r--r--docs/release/userguide/index.rst6
-rw-r--r--lib/auto/testcase/resiliency/AutoResilIftCloud.py46
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfCloud.py227
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMain.py7
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMgTestDef.py276
-rw-r--r--lib/auto/testcase/resiliency/clouds.yaml91
12 files changed, 706 insertions, 155 deletions
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 0000000..b3f0c09
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,60 @@
+---
+project: 'ONAP-Automated OPNFV (Auto)'
+project_creation_date: 'August 15, 2017'
+project_category: ''
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_auto_ptl
+ name: 'Tina Tsou'
+ email: 'tina.tsou@arm.com'
+ id: 'tinatsou'
+ company: 'arm.com'
+ timezone: 'Unknown'
+primary_contact: *opnfv_auto_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/AUTO'
+ key: 'AUTO'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[auto]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-auto'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: # eg: 'https://wiki.opnfv.org/display/'
+ url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: # eg: '16:00 UTC'
+repositories:
+ - 'auto'
+committers:
+ - <<: *opnfv_auto_ptl
+ - name: 'Aric Gardner'
+ email: 'agardner@linuxfoundation.org'
+ company: 'linuxfoundation.org'
+ id: 'agardner'
+ - name: 'Harry Huang'
+ email: 'huangxiangyu5@huawei.com'
+ company: 'huawei.com'
+ id: 'huangxiangyu'
+ - name: 'Madhukesh Sambashivaiah'
+ email: 'madhukeshs@gmail.com'
+ company: 'gmail.com'
+ id: 'madhukeshs'
+ - name: 'Song Zhu'
+ email: 'song.zhu@arm.com'
+ company: 'arm.com'
+ id: 'mail22song'
+ - name: 'Liang Ou'
+ email: 'oul.gd@chinatelecom.cn'
+ company: 'chinatelecom.cn'
+ id: 'ouliang1'
+tsc:
+ # yamllint disable rule:line-length
+ approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html'
+ # yamllint enable rule:line-length
diff --git a/docs/release/release-notes/Auto-release-notes.rst b/docs/release/release-notes/Auto-release-notes.rst
index 18dee5c..84665cd 100644
--- a/docs/release/release-notes/Auto-release-notes.rst
+++ b/docs/release/release-notes/Auto-release-notes.rst
@@ -1,11 +1,7 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-.. contents::
- :depth: 3
- :local:
+.. (c) Open Platform for NFV Project, Inc. and its contributors
==================
@@ -29,14 +25,14 @@ OPNFV is a SDNFV system integration project for open-source components, which so
In particular, OPNFV has yet to integrate higher-level automation features for VNFs and end-to-end Services.
Auto ("ONAP-Automated OPNFV") will focus on ONAP component integration and verification with OPNFV reference platforms/scenarios, through primarily a post-install process in order to avoid impact to OPNFV installer projects. As much as possible, this will use a generic installation/integration process (not specific to any OPNFV installer's technology).
-* `ONAP <https://www.onap.org/`_, a Linux Foundation Project, is an open source software platform that delivers robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of Software-Defined Networks (SDNs).
+
+* `ONAP <https://www.onap.org/`_ (a Linux Foundation Project) is an open source software platform that delivers robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of Software-Defined Networks (SDNs).
While all of ONAP is in scope, as it proceeds, the project will focus on specific aspects of this integration and verification in each release. Some example topics and work items include:
* How ONAP meets VNFM standards, and interacts with VNFs from different vendors
* How ONAP SDN-C uses OPNFV existing features, e.g. NetReady, in a two-layer controller architecture in which the upper layer (global controller) is replaceable, and the lower layer can use different vendor’s local controller to interact with SDN-C
-* What data collection interface VNF and controllers provide to ONAP DCAE, and (through DCAE), to closed-loop control functions such as Policy
-Tests which verify interoperability of ONAP automation/lifecycle features with specific NFVI and VIM features, as prioritized by the project with technical community and EUAG input. Examples include:
+* What data collection interface VNF and controllers provide to ONAP DCAE, and (through DCAE), to closed-loop control functions such as Policy Tests which verify interoperability of ONAP automation/lifecycle features with specific NFVI and VIM features, as prioritized by the project with technical community and EUAG input. Examples include:
* Abstraction of networking tech/features e.g. through NetReady/Gluon
* Blueprint-based VNF deployment (HOT, TOSCA, YANG)
@@ -44,8 +40,8 @@ Tests which verify interoperability of ONAP automation/lifecycle features with s
* Policy (through DCAE)
* Telemetry (through VES/DCAE)
-Initial areas of focus for Auto (in orange dotted lines; this scope can be expanded for future releases)
-It is understood that:
+Initial areas of focus for Auto (in orange dotted lines; this scope can be expanded for future releases). It is understood that:
+
* ONAP scope extends beyond the lines drawn below
* ONAP architecture does not necessarily align with the ETSI NFV inspired diagrams this is based upon
@@ -53,12 +49,14 @@ It is understood that:
Testability:
+
* Tests will be developed for use cases within the project scope.
* In future releases, tests will be added to Functest runs for supporting scenarios.
Auto’s goals include the standup and tests for integrated ONAP-Cloud platforms (“Cloud” here being OPNFV “scenarios” or other cloud environments). Thus, the artifacts would be tools to deploy ONAP (leveraging OOM whenever possible (starting with Beijing release of ONAP), and a preference for the containerized version of ONAP), to integrate it with clouds, to onboard and deploy test VNFs, to configure policies and closed-loop controls, and to run use-case defined tests against that integrated environment. OPNFV scenarios would be a possible component in the above.
Auto currently defines three use cases: Edge Cloud, Resiliency Improvements, and Enterprise vCPE. These use cases aim to show:
+
* increased autonomy of Edge Cloud management (automation, catalog-based deployment)
* increased resilience (i.e. fast VNF recovery in case of failure or problem, thanks to closed-loop control)
* enterprise-grade performance of vCPEs (certification during onboarding, then real-time performance assurance with SLAs and HA).
@@ -73,9 +71,11 @@ An ONAP instance (without DCAE) has been installed over Kubernetes on bare metal
Onboarding of 2 VNFs is in progress: a vCPE and a vFW.
Integration with Arm servers has started (exploring binary compatibility):
+
* Openstack is currently installed on a 6-server pod of Arm servers
* a Kubernetes cluster is installed there as well, for another instance of ONAP on Arm servers
* An additional set of 14 Arm servers is in the process of being deployed at UNH, for increased capacity
+* LaaS (Lab as a Service) resources are also used (hpe16, hpe17, hpe19)
Test case implementation for the three use cases has started.
@@ -109,14 +109,14 @@ Module version changes
Document version changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~
- There have been no version changes.
Reason for version
-^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^
Feature additions
-~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~
Initial release, with use case descriptions, release plan, and in-progress test cases and ONAP installations.
@@ -144,9 +144,8 @@ Initial release, with use case descriptions, release plan, and in-progress test
+--------------------------------------+--------------------------------------+
-
Bug corrections
-~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~
**JIRA TICKETS:**
@@ -162,18 +161,19 @@ Bug corrections
+--------------------------------------+--------------------------------------+
Deliverables
-----------------
+============
Software deliverables
-^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^
Initial release: in-progress install scripts and test case implementations.
Documentation deliverables
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^^
Initial versions of:
+
* User guide `OPNFV User and Configuration Guide <http://docs.opnfv.org/en/latest/release/userguide.introduction.html>`_
* Release notes (this document)
@@ -183,7 +183,7 @@ Known Limitations, Issues and Workarounds
=========================================
System Limitations
-^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^
* ONAP still to be validated for Arm servers
* DCAE still to be validated for Kubernetes
@@ -191,7 +191,7 @@ System Limitations
Known issues
-^^^^^^^^^^^^^^^
+^^^^^^^^^^^^
None at this point.
@@ -210,7 +210,7 @@ None at this point.
+--------------------------------------+--------------------------------------+
Workarounds
-^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^
None at this point.
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index 37970c1..7a70167 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -2,13 +2,14 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
===============================================
OPNFV Auto (ONAP-Automated OPNFV) Release Notes
===============================================
.. toctree::
- :maxdepth: 1
+ :numbered:
+ :maxdepth: 2
Auto-release-notes.rst
-
diff --git a/docs/release/userguide/UC01-feature.userguide.rst b/docs/release/userguide/UC01-feature.userguide.rst
index fd3a05f..5cf38e1 100644
--- a/docs/release/userguide/UC01-feature.userguide.rst
+++ b/docs/release/userguide/UC01-feature.userguide.rst
@@ -1,11 +1,7 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-.. contents::
- :depth: 3
- :local:
+.. (c) Open Platform for NFV Project, Inc. and its contributors
======================================
@@ -21,17 +17,15 @@ Description
This use case aims at showcasing the benefits of using ONAP for autonomous Edge Cloud management.
-A high level of automation of VNF lifecycle event handling after launch is enabled by ONAP policies
-and closed-loop controls, which take care of most lifecycle events (start, stop, scale up/down/in/out,
-recovery/migration for HA) as well as their monitoring and SLA management.
+A high level of automation of VNF lifecycle event handling after launch is enabled by ONAP policies and closed-loop controls, which take care of most lifecycle events (start, stop, scale up/down/in/out, recovery/migration for HA) as well as their monitoring and SLA management.
-Multiple types of VNFs, for different execution environments, are first approved in the catalog thanks
-to the onboarding process, and then can be deployed and handled by multiple controllers in a systematic way.
+Multiple types of VNFs, for different execution environments, are first approved in the catalog thanks to the onboarding process, and then can be deployed and handled by multiple controllers in a systematic way.
This results in management efficiency (lower control/automation overhead) and high degree of autonomy.
Preconditions:
+
#. hardware environment in which Edge cloud may be deployed
#. an Edge cloud has been deployed and is ready for operation
#. ONAP has been deployed onto a Cloud, and is interfaced (i.e. provisioned for API access) to the Edge cloud
diff --git a/docs/release/userguide/UC02-feature.userguide.rst b/docs/release/userguide/UC02-feature.userguide.rst
index d5b9481..0ecb7de 100644
--- a/docs/release/userguide/UC02-feature.userguide.rst
+++ b/docs/release/userguide/UC02-feature.userguide.rst
@@ -1,31 +1,26 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-.. contents::
- :depth: 3
- :local:
+.. (c) Open Platform for NFV Project, Inc. and its contributors
================================================================
Auto User Guide: Use Case 2 Resiliency Improvements Through ONAP
================================================================
-This document provides the user guide for Fraser release of Auto,
-specifically for Use Case 2: Resiliency Improvements Through ONAP.
+This document provides the user guide for Fraser release of Auto, specifically for Use Case 2: Resiliency Improvements Through ONAP.
Description
===========
-This use case illustrates VNF failure recovery time reduction with ONAP, thanks to its automated monitoring and management.
-It simulates an underlying problem (failure, stress, etc.: any adverse condition in the network that can impact VNFs),
-tracks a VNF, and measures the amount of time it takes for ONAP to restore the VNF functionality.
+This use case illustrates VNF failure recovery time reduction with ONAP, thanks to its automated monitoring and management. It:
+
+* simulates an underlying problem (failure, stress, or any adverse condition in the network that can impact VNFs)
+* tracks a VNF
+* measures the amount of time it takes for ONAP to restore the VNF functionality.
-The benefit for NFV edge service providers is to assess what degree of added VIM+NFVI platform resilience for VNFs is obtained by
-leveraging ONAP closed-loop control, vs. VIM+NFVI self-managed resilience (which may not be aware of the VNF or the corresponding
-end-to-end Service, but only of underlying resources such as VMs and servers).
+The benefit for NFV edge service providers is to assess what degree of added VIM+NFVI platform resilience for VNFs is obtained by leveraging ONAP closed-loop control, vs. VIM+NFVI self-managed resilience (which may not be aware of the VNF or the corresponding end-to-end Service, but only of underlying resources such as VMs and servers).
Preconditions:
@@ -35,12 +30,9 @@ Preconditions:
#. ONAP has been deployed onto a cloud and is interfaced (i.e. provisioned for API access) to the Edge cloud
#. Components of ONAP have been deployed on the Edge cloud as necessary for specific test objectives
-In future releases, Auto Use cases will also include the deployment of ONAP (if not already installed), the deployment
-of test VNFs (pre-existing VNFs in pre-existing ONAP can be used in the test as well), the configuration of ONAP for
-monitoring these VNFs (policies, CLAMP, DCAE), in addition to the test scripts which simulate a problem and measures recovery time.
+In future releases, Auto Use cases will also include the deployment of ONAP (if not already installed), the deployment of test VNFs (pre-existing VNFs in pre-existing ONAP can be used in the test as well), the configuration of ONAP for monitoring these VNFs (policies, CLAMP, DCAE), in addition to the test scripts which simulate a problem and measures recovery time.
-Different types of problems can be simulated, hence the identification of multiple test cases corresponding to this use case,
-as illustrated in this diagram:
+Different types of problems can be simulated, hence the identification of multiple test cases corresponding to this use case, as illustrated in this diagram:
.. image:: auto-UC02-testcases.jpg
@@ -74,20 +66,19 @@ Test execution high-level description
The following two MSCs (Message Sequence Charts) show the actors and high-level interactions.
-The first MSC shows the preparation activities (assuming the hardware, network, cloud, and ONAP have already been installed):
-onboarding and deployment of VNFs (via ONAP portal and modules in sequence: SDC, VID, SO), and ONAP configuration
-(policy framework, closed-loops in CLAMP, activation of DCAE).
+The first MSC shows the preparation activities (assuming the hardware, network, cloud, and ONAP have already been installed): onboarding and deployment of VNFs (via ONAP portal and modules in sequence: SDC, VID, SO), and ONAP configuration (policy framework, closed-loops in CLAMP, activation of DCAE).
.. image:: auto-UC02-preparation.jpg
+
The second MSC illustrates the pattern of all test cases for the Resiliency Improvements:
+
* simulate the chosen problem (a.k.a. a "Challenge") for this test case, for example suspend a VM which may be used by a VNF
* start tracking the target VNF of this test case
* measure the ONAP-orchestrated VNF Recovery Time
* then the test stops simulating the problem (for example: resume the VM that was suspended),
-In parallel, the MSC also shows the sequence of events happening in ONAP, thanks to its configuration to provide Service
-Assurance for the VNF.
+In parallel, the MSC also shows the sequence of events happening in ONAP, thanks to its configuration to provide Service Assurance for the VNF.
.. image:: auto-UC02-pattern.jpg
@@ -95,42 +86,47 @@ Assurance for the VNF.
Test design: data model, implementation modules
===============================================
-The high-level design of classes shows the identification of several entities:
+The high-level design of classes identifies several entities:
+
* Test Case: as identified above, each is a special case of the overall use case (e.g., categorized by challenge type)
* Test Definition: gathers all the information necessary to run a certain test case
* Metric Definition: describes a certain metric that may be measured, in addition to Recovery Time
* Challenge Definition: describe the challenge (problem, failure, stress, ...) simulated by the test case
-* Recipient: entity that can receive commands and send responses, and that is queried by the Test Definition or Challenge Definition
-(a recipient would be typically a management service, with interfaces (CLI or API) for clients to query)
+* Recipient: entity that can receive commands and send responses, and that is queried by the Test Definition or Challenge Definition (a recipient would be typically a management service, with interfaces (CLI or API) for clients to query)
* Resources: with 3 types (VNF, cloud virtual resource such as a VM, physical resource such as a server)
+
Three of these entities have execution-time corresponding classes:
+
* Test Execution, which captures all the relevant data of the execution of a Test Definition
* Challenge Execution, which captures all the relevant data of the execution of a Challenge Definition
* Metric Value, which captures the a quantitative measurement of a Metric Definition (with a timestamp)
.. image:: auto-UC02-data1.jpg
+
The following diagram illustrates an implementation-independent design of the attributes of these entities:
+
.. image:: auto-UC02-data2.jpg
+
This next diagram shows the Python classes and attributes, as implemented by this Use Case (for all test cases):
.. image:: auto-UC02-data3.jpg
-Test definition data is stored in serialization files (Python pickles), while test execution data is stored in CSV
-files, for easier post-analysis.
-The module design is straightforward: functions and classes for managing data, for interfacing with recipients,
-for executing tests, and for interacting with the test user (choosing a Test Definition, showing the details
-of a Test Definition, starting the execution).
+Test definition data is stored in serialization files (Python pickles), while test execution data is stored in CSV files, for easier post-analysis.
+
+The module design is straightforward: functions and classes for managing data, for interfacing with recipients, for executing tests, and for interacting with the test user (choosing a Test Definition, showing the details of a Test Definition, starting the execution).
.. image:: auto-UC02-module1.jpg
+
This last diagram shows the test user menu functions:
.. image:: auto-UC02-module2.jpg
+
In future releases of Auto, testing environments such as FuncTest and Yardstick might be leveraged.
Also, anonymized test results could be collected from users willing to share them, and aggregates could be
diff --git a/docs/release/userguide/UC03-feature.userguide.rst b/docs/release/userguide/UC03-feature.userguide.rst
index 246b2c5..5f28158 100644
--- a/docs/release/userguide/UC03-feature.userguide.rst
+++ b/docs/release/userguide/UC03-feature.userguide.rst
@@ -1,11 +1,7 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. SPDX-License-Identifier CC-BY-4.0
-.. (c) optionally add copywriters name
-
-.. contents::
- :depth: 3
- :local:
+.. (c) Open Platform for NFV Project, Inc. and its contributors
===========================================
@@ -21,27 +17,17 @@ Description
This Use Case shows how ONAP can help ensuring that virtual CPEs (including vFW: virtual firewalls) in Edge Cloud are enterprise-grade.
-ONAP operations include a verification process for VNF onboarding (i.e. inclusion in the ONAP catalog),
-with multiple Roles (designer, tester, governor, operator), responsible for approving proposed VNFs
-(as VSPs (Vendor Software Products), and eventually as end-to-end Services).
+ONAP operations include a verification process for VNF onboarding (i.e. inclusion in the ONAP catalog), with multiple Roles (designer, tester, governor, operator), responsible for approving proposed VNFs (as VSPs (Vendor Software Products), and eventually as end-to-end Services).
-This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only
-chosen from such an approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet
-enterprise-grade requirements. ONAP provides a NBI in addition to a standard portal, thus enabling
-a programmatic deployment of VNFs, still conforming to ONAP processes.
+This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only chosen from such an approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet enterprise-grade requirements. ONAP provides a NBI in addition to a standard portal, thus enabling a programmatic deployment of VNFs, still conforming to ONAP processes.
-Moreover, ONAP also comprises real-time monitoring (by the DCAE component), which monitors performance for SLAs,
-can adjust allocated resources accordingly (elastic adjustment at VNF level), and can ensure High Availability.
+Moreover, ONAP also comprises real-time monitoring (by the DCAE component), which monitors performance for SLAs, can adjust allocated resources accordingly (elastic adjustment at VNF level), and can ensure High Availability.
-DCAE executes directives coming from policies described in the Policy Framework, and closed-loop controls
-described in the CLAMP component.
+DCAE executes directives coming from policies described in the Policy Framework, and closed-loop controls described in the CLAMP component.
-Finally, this automated approach also reduces costs, since repetitive actions are designed once and executed multiple times,
-as vCPEs are instantiated and decommissioned (frequent events, given the variability of business activity,
-and a Small Business market similar to the Residential market: many contract updates resulting in many vCPE changes).
+Finally, this automated approach also reduces costs, since repetitive actions are designed once and executed multiple times, as vCPEs are instantiated and decommissioned (frequent events, given the variability of business activity, and a Small Business market similar to the Residential market: many contract updates resulting in many vCPE changes).
-NFV edge service providers need to provide site2site, site2dc (Data Center) and site2internet services to tenants
-both efficiently and safely, by deploying such qualified enterprise-grade vCPE.
+NFV edge service providers need to provide site2site, site2dc (Data Center) and site2internet services to tenants both efficiently and safely, by deploying such qualified enterprise-grade vCPE.
Preconditions:
@@ -76,6 +62,7 @@ Details on the test cases corresponding to this use case:
* Spin up a vFW instance: Spin up a vFW instance, by calling NBI of the orchestrator.
* VPN as a Service
+
* Subscribe to a VPN service: Subscribe to a VPN service, by calling NBI of the orchestrator.
* Unsubscribe to a VPN service: Unsubscribe to a VPN service, by calling NBI of the orchestrator.
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
index ba44e0b..7cfbe94 100644
--- a/docs/release/userguide/index.rst
+++ b/docs/release/userguide/index.rst
@@ -2,8 +2,7 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
+.. (c) Open Platform for NFV Project, Inc. and its contributors
============================================
OPNFV Auto (ONAP-Automated OPNFV) User Guide
@@ -16,7 +15,8 @@ OPNFV Auto (ONAP-Automated OPNFV) User Guide
.. by the installer project.
.. toctree::
- :maxdepth: 1
+ :numbered:
+ :maxdepth: 2
UC01-feature.userguide.rst
UC02-feature.userguide.rst
diff --git a/lib/auto/testcase/resiliency/AutoResilIftCloud.py b/lib/auto/testcase/resiliency/AutoResilIftCloud.py
deleted file mode 100644
index 01e948b..0000000
--- a/lib/auto/testcase/resiliency/AutoResilIftCloud.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python3
-
-# ===============LICENSE_START=======================================================
-# Apache-2.0
-# ===================================================================================
-# Copyright (C) 2018 Wipro. All rights reserved.
-# ===================================================================================
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ===============LICENSE_END=========================================================
-
-
-# OPNFV Auto project
-# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
-
-# Use case 02: Resilience Improvements
-# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
-# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
-
-# This module: interfaces with cloud managers (OpenStack, Kubernetes, AWS, ...)
-
-
-######################################################################
-# import statements
-import AutoResilGlobal
-
-
-def f1():
- return 0
-
-# OpenStack HTTP API: https://developer.openstack.org/api-ref/compute/
-#{your_compute_service_url}/servers/{server_id}/action
-#GET
-#http://mycompute.pvt/compute/v2.1/servers/{server_id}/suspend
-#http://mycompute.pvt/compute/v2.1/servers/{server_id}/resume
-
diff --git a/lib/auto/testcase/resiliency/AutoResilItfCloud.py b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
new file mode 100644
index 0000000..69c5327
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
@@ -0,0 +1,227 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with cloud managers (OpenStack, Kubernetes, AWS, ...)
+
+
+######################################################################
+# import statements
+import AutoResilGlobal
+
+# for method 1 and 2
+#import openstack
+
+#for method 3
+from openstack import connection
+
+def os_list_servers(conn):
+ """List OpenStack servers."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/compute.html
+ if conn != None:
+ print("\nList Servers:")
+
+ try:
+ i=1
+ for server in conn.compute.servers():
+ print('Server',str(i),'\n',server,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Servers\n")
+
+
+def os_list_networks(conn):
+ """List OpenStack networks."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/network.html
+ if conn != None:
+ print("\nList Networks:")
+
+ try:
+ i=1
+ for network in conn.network.networks():
+ print('Network',str(i),'\n',network,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Networks\n")
+
+
+def os_list_volumes(conn):
+ """List OpenStack volumes."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/block_storage.html
+ # note: The block_storage member will only be added if the service is detected.
+ if conn != None:
+ print("\nList Volumes:")
+
+ try:
+ i=1
+ for volume in conn.block_storage.volumes():
+ print('Volume',str(i),'\n',volume,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Volumes\n")
+
+
+def os_list_users(conn):
+ """List OpenStack users."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
+ if conn != None:
+ print("\nList Users:")
+
+ try:
+ i=1
+ for user in conn.identity.users():
+ print('User',str(i),'\n',user,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Users\n")
+
+def os_list_projects(conn):
+ """List OpenStack projects."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
+ if conn != None:
+ print("\nList Projects:")
+
+ try:
+ i=1
+ for project in conn.identity.projects():
+ print('Project',str(i),'\n',project,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Projects\n")
+
+
+def os_list_domains(conn):
+ """List OpenStack domains."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
+ if conn != None:
+ print("\nList Domains:")
+
+ try:
+ i=1
+ for domain in conn.identity.domains():
+ print('Domain',str(i),'\n',domain,'n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Domains\n")
+
+
+
+
+
+
+
+def gdtest_openstack():
+ # Method 1: assume there is a clouds.yaml file in PATH, starting path search with local directory
+ #conn = openstack.connect(cloud='armopenstack', region_name='RegionOne')
+ #conn = openstack.connect(cloud='hpe16openstack', region_name='RegionOne')
+ # getting error: AttributeError: module 'openstack' has no attribute 'connect'
+
+ # Method 2: pass arguments directly, all as strings
+ # see details at https://docs.openstack.org/python-openstacksdk/latest/user/connection.html
+ # conn = openstack.connect(
+ # auth_url='https://10.10.50.103:5000/v2.0',
+ # project_name='admin',
+ # username='admin',
+ # password='opnfv_secret',
+ # region_name='RegionOne',
+ # )
+ # conn = openstack.connect(
+ # auth_url='http://10.16.0.101:5000/v2.0',
+ # project_name='admin',
+ # username='admin',
+ # password='opnfv_secret',
+ # region_name='RegionOne',
+ # )
+ # getting error: AttributeError: module 'openstack' has no attribute 'connect'
+
+ # Method 3: create Connection object directly
+ auth_args = {
+ #'auth_url': 'https://10.10.50.103:5000/v2.0', # Arm
+ #'auth_url': 'http://10.16.0.101:5000/v2.0', # hpe16, Euphrates
+ 'auth_url': 'http://10.16.0.107:5000/v3', # hpe16, Fraser
+ 'project_name': 'admin',
+ 'username': 'admin',
+ 'password': 'opnfv_secret',
+ 'region_name': 'RegionOne',
+ 'domain': 'Default'}
+ conn = connection.Connection(**auth_args)
+
+ #conn = connection.Connection(
+ #auth_url='http://10.16.0.107:5000/v3',
+ #project_name='admin',
+ #username='admin',
+ #password='opnfv_secret')
+
+
+ os_list_servers(conn)
+ os_list_networks(conn)
+ os_list_volumes(conn)
+ os_list_users(conn)
+ os_list_projects(conn)
+ os_list_domains(conn)
+
+
+ # get_server(server): Get a single Server
+ # Parameters: server – The value can be the ID of a server or a Server instance.
+ # conn.compute.get_server(server)
+
+ # suspend_server(server): Suspends a server and changes its status to SUSPENDED.
+ # Parameters: server – Either the ID of a server or a Server instance.
+ # conn.compute.suspend_server(server)
+
+ # resume_server(server): Resumes a suspended server and changes its status to ACTIVE.
+ # Parameters: server – Either the ID of a server or a Server instance.
+ # conn.compute.resume_server(server)
+
+
+def main():
+
+ print("\nTest Auto Cloud Interface")
+
+ gdtest_openstack()
+
+ print("Ciao\n")
+
+if __name__ == "__main__":
+ main()
+
+
+# OpenStack HTTP API: https://developer.openstack.org/api-ref/compute/
+#{your_compute_service_url}/servers/{server_id}/action
+#GET
+#http://mycompute.pvt/compute/v2.1/servers/{server_id}/suspend
+#http://mycompute.pvt/compute/v2.1/servers/{server_id}/resume
+# but better use the python unified client
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilMain.py b/lib/auto/testcase/resiliency/AutoResilMain.py
index 03e7151..2f67bdf 100644
--- a/lib/auto/testcase/resiliency/AutoResilMain.py
+++ b/lib/auto/testcase/resiliency/AutoResilMain.py
@@ -164,8 +164,11 @@ def main():
print("Problem with test definition: empty")
sys.exit() # stop entire program, because test definition MUST be correct
else:
- # TODO run test: method of TestDefinition, or function ?
- pass
+ # TODO run test: call selected test definition run_test_code() method
+ test_def = get_indexed_item_from_list(selected_test_def_ID, AutoResilGlobal.test_definition_list)
+ if test_def != None:
+ test_def.run_test_code()
+
else:
print("No current selection of Test Definition. Try again.")
continue
diff --git a/lib/auto/testcase/resiliency/AutoResilMgTestDef.py b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
index 757d1e5..9667f93 100644
--- a/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
+++ b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
@@ -54,6 +54,7 @@ import sys
from enum import Enum
from datetime import datetime, timedelta
import AutoResilGlobal
+#import openstack
# Constants with definition file names
FILE_PHYSICAL_RESOURCES = "ResourcesPhysical.bin"
@@ -152,8 +153,6 @@ def get_indexed_item_from_file(index, file_name):
-
-
######################################################################
class TestCase(AutoBaseObject):
@@ -269,7 +268,8 @@ class TestDefinition(AutoBaseObject):
test_def_associatedMetricsIDs,
test_def_recipientIDs,
test_def_testCLICommandSent,
- test_def_testAPICommandSent):
+ test_def_testAPICommandSent,
+ test_def_codeID):
# superclass constructor
AutoBaseObject.__init__(self, test_def_ID, test_def_name)
@@ -291,6 +291,93 @@ class TestDefinition(AutoBaseObject):
# associated test API commands to Recipients (list of data objects)
self.test_API_command_sent_list = test_def_testAPICommandSent
+ # constant for total number of test codes (one of them is used per TestDefinition instance); would be 1 per test case
+ self.TOTAL_NUMBER_OF_TEST_CODES = 10
+ # chosen test code ID (the ID is an index in a list of method names) for this instance; convention: [1;N]; in list, index is [0;N-1]
+ # a test code could use for instance Python clients (for OpenStack, Kubernetes, etc.), or HTTP APIs, or some of the CLI/API commands
+ try:
+ if 1 <= test_def_codeID <= self.TOTAL_NUMBER_OF_TEST_CODES:
+ self.test_code_ID = test_def_codeID
+ else:
+ print("TestDefinition constructor: incorrect test_def_codeID=",test_def_codeID)
+ sys.exit() # stop entire program, because code ID MUST be correct
+ except Exception as e:
+ print(type(e), e)
+ sys.exit() # stop entire program, because code ID MUST be correct
+
+ self.test_code_list = [] # list of method names; leave as per-object method (i.e. not as class methods or as static methods)
+ # add one by one, for easier later additions of new methods
+ self.test_code_list.append(self.test_code001)
+ self.test_code_list.append(self.test_code002)
+ self.test_code_list.append(self.test_code003)
+ self.test_code_list.append(self.test_code004)
+ self.test_code_list.append(self.test_code005)
+ self.test_code_list.append(self.test_code006)
+ self.test_code_list.append(self.test_code007)
+ self.test_code_list.append(self.test_code008)
+ self.test_code_list.append(self.test_code009)
+ self.test_code_list.append(self.test_code010)
+
+
+ def run_test_code(self):
+ """Run currently selected test code."""
+ try:
+ test_code_index = self.test_code_ID - 1 # lists are indexed from 0 to N-1
+ self.test_code_list[test_code_index]() # invoke corresponding method, via index
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+ # library of test codes, probably 1 per test case, so test_case_ID would be the same as test_code_ID
+ def test_code001(self):
+ """Test case code number 001."""
+ print("This is test_code001 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code002(self):
+ """Test case code number 002."""
+ print("This is test_code002 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code003(self):
+ """Test case code number 003."""
+ print("This is test_code003 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code004(self):
+ """Test case code number 004."""
+ print("This is test_code004 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code005(self):
+ """Test case code number 005."""
+ print("This is test_code005 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ # here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,
+ # monitoring of VNF, trigger stop code from challenge def, perform restoration of VNF
+ challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)
+ if challenge_def != None:
+ challenge_def.run_start_challenge_code()
+ challenge_def.run_stop_challenge_code()
+
+
+ def test_code006(self):
+ """Test case code number 006."""
+ print("This is test_code006 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code007(self):
+ """Test case code number 007."""
+ print("This is test_code007 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code008(self):
+ """Test case code number 008."""
+ print("This is test_code008 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code009(self):
+ """Test case code number 009."""
+ print("This is test_code009 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code010(self):
+ """Test case code number 010."""
+ print("This is test_code010 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
def printout_all(self, indent_level):
"""Print out all attributes, with an indentation level."""
@@ -304,6 +391,8 @@ class TestDefinition(AutoBaseObject):
if test_case != None:
test_case.printout_all(indent_level+1)
+ print(indent, "|-test code ID:", self.test_code_ID, sep='')
+
print(indent, "|-associated challenge def ID:", self.challenge_def_ID, sep='')
challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)
if challenge_def != None:
@@ -357,6 +446,7 @@ def init_test_definitions():
test_def_recipientIDs = [2]
test_def_testCLICommandSent = ["pwd","kubectl describe pods --include-uninitialized=false"]
test_def_testAPICommandSent = ["data1","data2"]
+ test_def_testCodeID = 5
test_definitions.append(TestDefinition(test_def_ID, test_def_name,
test_def_challengeDefID,
test_def_testCaseID,
@@ -364,7 +454,8 @@ def init_test_definitions():
test_def_associatedMetricsIDs,
test_def_recipientIDs,
test_def_testCLICommandSent,
- test_def_testAPICommandSent))
+ test_def_testAPICommandSent,
+ test_def_testCodeID))
# write list to binary file
write_list_bin(test_definitions, FILE_TEST_DEFINITIONS)
@@ -400,7 +491,8 @@ class ChallengeDefinition(AutoBaseObject):
chall_def_startChallengeCLICommandSent,
chall_def_stopChallengeCLICommandSent,
chall_def_startChallengeAPICommandSent,
- chall_def_stopChallengeAPICommandSent):
+ chall_def_stopChallengeAPICommandSent,
+ chall_def_codeID):
# superclass constructor
AutoBaseObject.__init__(self, chall_def_ID, chall_def_name)
@@ -431,6 +523,142 @@ class ChallengeDefinition(AutoBaseObject):
# if API; to restore to normal
self.stop_challenge_API_command_sent = chall_def_stopChallengeAPICommandSent
+ # constant for total number of challenge codes (one of them is used per ChallengeDefinition instance);
+ # may be 1 per test case, maybe not (common challenges, could be re-used across test definitions and test cases)
+ # start and stop challenges are strictly linked: exactly 1 Stop challenge for each Start challenge, so same ID for Start and for Stop
+ self.TOTAL_NUMBER_OF_CHALLENGE_CODES = 10
+
+ # chosen start/stop challenge code ID (the ID is an index in a list of method names) for this instance;
+ # convention: [1;N]; in list, index is [0;N-1]
+ # a challenge code could use for instance Python clients (for OpenStack, Kubernetes, etc.), or HTTP APIs, or some of the CLI/API commands
+ try:
+ if 1 <= chall_def_codeID <= self.TOTAL_NUMBER_OF_CHALLENGE_CODES:
+ self.challenge_code_ID = chall_def_codeID
+ else:
+ print("ChallengeDefinition constructor: incorrect chall_def_codeID=",chall_def_codeID)
+ sys.exit() # stop entire program, because code ID MUST be correct
+ except Exception as e:
+ print(type(e), e)
+ sys.exit() # stop entire program, because code ID MUST be correct
+
+ # list of method names; leave as per-object method (i.e. not as class methods or as static methods)
+ self.start_challenge_code_list = []
+ self.stop_challenge_code_list = []
+ # add one by one, for easier later additions of new methods; MUST be same index for Start and for Stop
+ self.start_challenge_code_list.append(self.start_challenge_code001)
+ self.stop_challenge_code_list.append(self.stop_challenge_code001)
+ self.start_challenge_code_list.append(self.start_challenge_code002)
+ self.stop_challenge_code_list.append(self.stop_challenge_code002)
+ self.start_challenge_code_list.append(self.start_challenge_code003)
+ self.stop_challenge_code_list.append(self.stop_challenge_code003)
+ self.start_challenge_code_list.append(self.start_challenge_code004)
+ self.stop_challenge_code_list.append(self.stop_challenge_code004)
+ self.start_challenge_code_list.append(self.start_challenge_code005)
+ self.stop_challenge_code_list.append(self.stop_challenge_code005)
+ self.start_challenge_code_list.append(self.start_challenge_code006)
+ self.stop_challenge_code_list.append(self.stop_challenge_code006)
+ self.start_challenge_code_list.append(self.start_challenge_code007)
+ self.stop_challenge_code_list.append(self.stop_challenge_code007)
+ self.start_challenge_code_list.append(self.start_challenge_code008)
+ self.stop_challenge_code_list.append(self.stop_challenge_code008)
+ self.start_challenge_code_list.append(self.start_challenge_code009)
+ self.stop_challenge_code_list.append(self.stop_challenge_code009)
+ self.start_challenge_code_list.append(self.start_challenge_code010)
+ self.stop_challenge_code_list.append(self.stop_challenge_code010)
+
+
+ def run_start_challenge_code(self):
+ """Run currently selected challenge code, start portion."""
+ try:
+ code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
+ self.start_challenge_code_list[code_index]() # invoke corresponding start method, via index
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+ def run_stop_challenge_code(self):
+ """Run currently selected challenge code, stop portion."""
+ try:
+ code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
+ self.stop_challenge_code_list[code_index]() # invoke corresponding stop method, via index
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+
+ # library of challenge codes
+ def start_challenge_code001(self):
+ """Start Challenge code number 001."""
+ print("This is start_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code001(self):
+ """Stop Challenge code number 001."""
+ print("This is stop_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code002(self):
+ """Start Challenge code number 002."""
+ print("This is start_challenge_code002 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code002(self):
+ """Stop Challenge code number 002."""
+ print("This is stop_challenge_code002 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code003(self):
+ """Start Challenge code number 003."""
+ print("This is start_challenge_code003 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code003(self):
+ """Stop Challenge code number 003."""
+ print("This is stop_challenge_code003 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code004(self):
+ """Start Challenge code number 004."""
+ print("This is start_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code004(self):
+ """Stop Challenge code number 004."""
+ print("This is stop_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code005(self):
+ """Start Challenge code number 005."""
+ print("This is start_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code005(self):
+ """Stop Challenge code number 005."""
+ print("This is stop_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code006(self):
+ """Start Challenge code number 006."""
+ print("This is start_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code006(self):
+ """Stop Challenge code number 006."""
+ print("This is stop_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code007(self):
+ """Start Challenge code number 007."""
+ print("This is start_challenge_code007 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code007(self):
+ """Stop Challenge code number 007."""
+ print("This is stop_challenge_code007 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code008(self):
+ """Start Challenge code number 008."""
+ print("This is start_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code008(self):
+ """Stop Challenge code number 008."""
+ print("This is stop_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code009(self):
+ """Start Challenge code number 009."""
+ print("This is start_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code009(self):
+ """Stop Challenge code number 009."""
+ print("This is stop_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code010(self):
+ """Start Challenge code number 010."""
+ print("This is start_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code010(self):
+ """Stop Challenge code number 010."""
+ print("This is stop_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
+
+
def printout_all(self, indent_level):
"""Print out all attributes, with an indentation level."""
@@ -441,6 +669,8 @@ class ChallengeDefinition(AutoBaseObject):
print(indent, "|-challenge type:", self.challenge_type, sep='')
+ print(indent, "|-challenge code ID:", self.challenge_code_ID, sep='')
+
print(indent, "|-associated recipient ID:", self.recipient_ID, sep='')
recipient = get_indexed_item_from_list(self.recipient_ID, AutoResilGlobal.recipient_list)
if recipient != None:
@@ -498,6 +728,8 @@ def init_challenge_definitions():
chall_def_startChallengeAPICommandSent = []
chall_def_stopChallengeAPICommandSent = []
+ chall_def_codeID = 5
+
challenge_defs.append(ChallengeDefinition(chall_def_ID, chall_def_name,
chall_def_challengeType,
chall_def_recipientID,
@@ -508,7 +740,8 @@ def init_challenge_definitions():
chall_def_startChallengeCLICommandSent,
chall_def_stopChallengeCLICommandSent,
chall_def_startChallengeAPICommandSent,
- chall_def_stopChallengeAPICommandSent))
+ chall_def_stopChallengeAPICommandSent,
+ chall_def_codeID))
# write list to binary file
write_list_bin(challenge_defs, FILE_CHALLENGE_DEFINITIONS)
@@ -687,7 +920,7 @@ class RecoveryTimeDef(MetricDefinition):
if time_challenge_started > time_restoration_detected:
print("time_challenge_started should be <= time_restoration_detected")
print("time_challenge_started=",time_challenge_started," time_restoration_detected=",time_restoration_detected)
- sys.exit() # stop entire program, because fomulas MUST be correct
+ sys.exit() # stop entire program, because formulas MUST be correct
measured_metric_value = time_restoration_detected - time_challenge_started #difference between 2 datetime is a timedelta
timestamp = datetime.now()
@@ -712,27 +945,27 @@ class UptimePercentageDef(MetricDefinition):
if measured_uptime < 0.0:
print("measured_uptime should be >= 0.0")
print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
- sys.exit() # stop entire program, because fomulas MUST be correct
+ sys.exit() # stop entire program, because formulas MUST be correct
if reference_time <= 0.0:
print("reference_time should be > 0.0")
print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
- sys.exit() # stop entire program, because fomulas MUST be correct
+ sys.exit() # stop entire program, because formulas MUST be correct
if planned_downtime < 0.0:
print("planned_downtime should be >= 0.0")
print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
- sys.exit() # stop entire program, because fomulas MUST be correct
+ sys.exit() # stop entire program, because formulas MUST be correct
if reference_time < planned_downtime:
print("reference_time should be >= planned_downtime")
print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
- sys.exit() # stop entire program, because fomulas MUST be correct
+ sys.exit() # stop entire program, because formulas MUST be correct
if measured_uptime > reference_time:
print("measured_uptime should be <= reference_time")
print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
- sys.exit() # stop entire program, because fomulas MUST be correct
+ sys.exit() # stop entire program, because formulas MUST be correct
if measured_uptime > (reference_time - planned_downtime):
print("measured_uptime should be <= (reference_time - planned_downtime)")
print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
- sys.exit() # stop entire program, because fomulas MUST be correct
+ sys.exit() # stop entire program, because formulas MUST be correct
measured_metric_value = 100 * measured_uptime / (reference_time - planned_downtime)
timestamp = datetime.now()
@@ -1340,10 +1573,21 @@ def main():
print()
+ challgs = init_challenge_definitions()
+ print(challgs)
+ chall = get_indexed_item_from_file(1,FILE_CHALLENGE_DEFINITIONS)
+ print(chall)
+ chall.run_start_challenge_code()
+ chall.run_stop_challenge_code()
+
+ print()
+
tds = init_test_definitions()
print(tds)
td = get_indexed_item_from_file(1,FILE_TEST_DEFINITIONS)
print(td)
+ #td.printout_all(0)
+ #td.run_test_code()
print()
@@ -1354,12 +1598,6 @@ def main():
print()
- challgs = init_challenge_definitions()
- print(challgs)
- chall = get_indexed_item_from_file(1,FILE_CHALLENGE_DEFINITIONS)
- print(chall)
-
- print()
metricdefs = init_metric_definitions()
print(metricdefs)
diff --git a/lib/auto/testcase/resiliency/clouds.yaml b/lib/auto/testcase/resiliency/clouds.yaml
new file mode 100644
index 0000000..593a07c
--- /dev/null
+++ b/lib/auto/testcase/resiliency/clouds.yaml
@@ -0,0 +1,91 @@
+clouds:
+
+ # Openstack instance on Arm pod, controller IP@ 172.16.10.10
+ # Horizon: https://10.10.50.103/project/
+ # Identity API according to Horizon dashboard: https://10.10.50.103:5000/v2.0
+ # other potential auth_url: http://172.16.10.10:35357/v3
+ # (OS_AUTH_URL=http://controller:35357/v3)
+ # 2 project names: admin, service (project = tenant)
+ # project ID: 122caf64b3df4818bf2ce5ba793226b2
+ # EC2 URL: https://10.10.50.103:8773/services/Cloud
+ # EC2 access key: bcf3c69a7d1c405e9757f87f26faf19f
+ # 10.10.50.0/8: floating IP@
+ # 10.10.10.0/8: fixed IP@
+ armopenstack:
+ auth:
+ auth_url: https://10.10.50.103:5000/v2.0
+ username: admin
+ password: opnfv_secret
+ project_name: admin
+ region_name: RegionOne
+
+ # Openstack instance on LaaS hpe16, from OPNFV Euphrates, controller IP@ (mgt: 172.16.10.101; public: 10.16.0.101)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.101:35357/v2.0
+ # internal: http://172.16.10.101:5000/v2.0
+ # public: http://10.16.0.101:5000/v2.0 : works on LaaS hpe16, from hpe16
+ hpe16openstackEuphrates:
+ auth:
+ auth_url: http://10.16.0.101:5000/v2.0
+ username: admin
+ password: opnfv_secret
+ project_name: admin
+ region_name: RegionOne
+
+ # Openstack instance on LaaS hpe16, from OPNFV Fraser, controller IP@ (mgt: 172.16.10.36; public: 10.16.0.107)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.36:35357/v3
+ # internal: http://172.16.10.36:5000/v3
+ # public: http://10.16.0.107:5000/v3
+ hpe16openstackFraser:
+ auth:
+ auth_url: http://10.16.0.107:5000/v3
+ username: admin
+ password: opnfv_secret
+ project_name: admin
+ region_name: RegionOne
+
+# ubuntu@ctl01:~$ openstack project show admin
+# +-------------+----------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------+
+# | description | OpenStack Admin tenant |
+# | domain_id | default |
+# | enabled | True |
+# | id | 04fcfe7aa83f4df79ae39ca748aa8637 |
+# | is_domain | False |
+# | name | admin |
+# | parent_id | default |
+# +-------------+----------------------------------+
+
+# (openstack) domain show default
+# +-------------+----------------------------------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------------------------------+
+# | description | Domain created automatically to support V2.0 operations. |
+# | enabled | True |
+# | id | default |
+# | name | Default |
+# +-------------+----------------------------------------------------------+
+
+# (openstack) domain show heat_user_domain
+# +-------------+---------------------------------------------+
+# | Field | Value |
+# +-------------+---------------------------------------------+
+# | description | Contains users and projects created by heat |
+# | enabled | True |
+# | id | d9c29adac0fe4816922d783b257879d6 |
+# | name | heat_user_domain |
+# +-------------+---------------------------------------------+
+
+export OS_AUTH_URL=http://10.16.0.107:5000/v3
+export OS_PROJECT_ID=04fcfe7aa83f4df79ae39ca748aa8637
+export OS_PROJECT_NAME="admin"
+export OS_USER_DOMAIN_NAME="Default"
+export OS_USERNAME="admin"
+export OS_PASSWORD="opnfv_secret"
+export OS_REGION_NAME="RegionOne"
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
+
+