aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorHarry Huang <huangxiangyu5@huawei.com>2018-03-13 10:52:01 +0800
committerHarry Huang <huangxiangyu5@huawei.com>2018-03-13 10:52:01 +0800
commit206c5a988ab003af7c04b82044eb565d8b3bcfb7 (patch)
treeea8acb86a0d681f95b768578430fba08a19226f5 /lib
parentee1c7bae9fd29ef2ac2ca04bbccb73230723c3ce (diff)
Modify repo structure
JIRA: - 1. keep modules in lib directory instead of auto Change-Id: Ie4c51b28554575bafbaa89c5f57309a786b903e0 Signed-off-by: Harry Huang <huangxiangyu5@huawei.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/auto.egg-info/PKG-INFO10
-rw-r--r--lib/auto.egg-info/SOURCES.txt11
-rw-r--r--lib/auto.egg-info/dependency_links.txt1
-rw-r--r--lib/auto.egg-info/requires.txt2
-rw-r--r--lib/auto.egg-info/top_level.txt1
-rw-r--r--lib/auto/__init__.py0
-rw-r--r--lib/auto/testcase/resiliency/AutoResilIftCloud.py39
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfOS.py38
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py39
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMain.py134
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMgTestDef.py1182
-rw-r--r--lib/auto/testcase/resiliency/AutoResilRunTest.py56
-rw-r--r--lib/auto/testcase/vnf/vbng/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env35
-rw-r--r--lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml288
-rw-r--r--lib/auto/testcase/vnf/vbrgemu/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env28
-rw-r--r--lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml253
-rw-r--r--lib/auto/testcase/vnf/vgmux/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env35
-rw-r--r--lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml281
-rw-r--r--lib/auto/testcase/vnf/vgw/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env32
-rw-r--r--lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml261
-rw-r--r--lib/auto/util/__init__.py0
-rw-r--r--lib/auto/util/openstack_lib.py332
-rw-r--r--lib/auto/util/util.py86
-rw-r--r--lib/auto/util/yaml_type.py12
28 files changed, 3224 insertions, 0 deletions
diff --git a/lib/auto.egg-info/PKG-INFO b/lib/auto.egg-info/PKG-INFO
new file mode 100644
index 0000000..d0a669b
--- /dev/null
+++ b/lib/auto.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: auto
+Version: 1.0.0
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/lib/auto.egg-info/SOURCES.txt b/lib/auto.egg-info/SOURCES.txt
new file mode 100644
index 0000000..ede3967
--- /dev/null
+++ b/lib/auto.egg-info/SOURCES.txt
@@ -0,0 +1,11 @@
+setup.py
+lib/auto/__init__.py
+lib/auto.egg-info/PKG-INFO
+lib/auto.egg-info/SOURCES.txt
+lib/auto.egg-info/dependency_links.txt
+lib/auto.egg-info/requires.txt
+lib/auto.egg-info/top_level.txt
+lib/auto/util/__init__.py
+lib/auto/util/openstack_lib.py
+lib/auto/util/util.py
+lib/auto/util/yaml_type.py \ No newline at end of file
diff --git a/lib/auto.egg-info/dependency_links.txt b/lib/auto.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/auto.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/lib/auto.egg-info/requires.txt b/lib/auto.egg-info/requires.txt
new file mode 100644
index 0000000..472fa91
--- /dev/null
+++ b/lib/auto.egg-info/requires.txt
@@ -0,0 +1,2 @@
+GitPython
+pycrypto
diff --git a/lib/auto.egg-info/top_level.txt b/lib/auto.egg-info/top_level.txt
new file mode 100644
index 0000000..865faf1
--- /dev/null
+++ b/lib/auto.egg-info/top_level.txt
@@ -0,0 +1 @@
+auto
diff --git a/lib/auto/__init__.py b/lib/auto/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/auto/__init__.py
diff --git a/lib/auto/testcase/resiliency/AutoResilIftCloud.py b/lib/auto/testcase/resiliency/AutoResilIftCloud.py
new file mode 100644
index 0000000..bca8b95
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilIftCloud.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with cloud managers (OpenStack, Kubernetes, AWS, ...)
+
+
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilItfOS.py b/lib/auto/testcase/resiliency/AutoResilItfOS.py
new file mode 100644
index 0000000..2fc6c85
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilItfOS.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with OS, or servers
+
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py b/lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py
new file mode 100644
index 0000000..b6b1745
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with VNF/NVF managers (focus on ONAP)
+# entities that manage VNFs and orchestrates services (VNF-M and NFV-O)
+
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilMain.py b/lib/auto/testcase/resiliency/AutoResilMain.py
new file mode 100644
index 0000000..a69e777
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilMain.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: main program
+# data initialization
+# interactive CLI user menu:
+# 1) select a test definition to run
+# 2) view definition of selected test (pull all data from definition files)
+# 3) start test
+# 4) exit
+
+
+#docstring
+""" This is the main module for OPNFV Auto Test Data for Use Case 2: Resilience Improvements Through ONAP.
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+
+
+
+######################################################################
+# import statements
+from AutoResilMgTestDef import *
+
+# Constants
+PROJECT_NAME = "Auto"
+USE_CASE_NAME = "Resilience Improvements Through ONAP"
+
+
+######################################################################
+
+def show_menu(selected_test_def_ID):
+ print("Project ", PROJECT_NAME, ", Use Case: ",USE_CASE_NAME)
+ if selected_test_def_ID>0 :
+ print("Current test Definition ID: ",selected_test_def_ID)
+ else:
+ print("Current test Definition ID: (none)")
+ print("1: select Test Definition ID")
+ print("2: view current Test Definition details")
+ print("3: start an execution of current Test Definition")
+ print("4: exit")
+
+
+def get_menu_choice():
+
+ while True:
+ try:
+ user_choice = int(input(" Choice: "))
+ except ValueError:
+ print("Invalid choice (must be an integer). Try again.")
+ continue
+ if user_choice < 1 or user_choice > 4:
+ print("Invalid choice (must be between 1 and 4). Try again.")
+ continue
+ else:
+ return user_choice
+
+
+def get_test_def_ID():
+
+ while True:
+ try:
+ user_test_def_ID = int(input(" Test Definition ID: "))
+ except ValueError:
+ print("Invalid choice (must be an integer). Try again.")
+ continue
+
+ test_defs = read_list_bin(FILE_TEST_DEFINITIONS)
+ if (test_defs == None) or (test_defs==[]):
+ print("Problem with test definition file: empty")
+ sys.exit()
+
+ if index_already_there(user_test_def_ID, test_defs):
+ return user_test_def_ID
+ else:
+ print("Invalid choice (Test Definition ID ",user_test_def_ID," does not exist). Try again.")
+ continue
+
+
+
+######################################################################
+def main():
+
+ # TODO: run initializations to refresh data and make sure files are here
+
+ selected_test_def_ID = -1
+
+ while True:
+
+ show_menu(selected_test_def_ID)
+ user_choice = get_menu_choice()
+ #print("user_choice:",user_choice) #test
+
+ if user_choice == 1:
+ selected_test_def_ID = get_test_def_ID()
+
+ if user_choice == 4:
+ sys.exit()
+
+ print(get_indexed_item_from_file(selected_test_def_ID,FILE_TEST_DEFINTIONS))
+
+ print(get_indexed_item_from_file(5,FILE_TEST_CASES))
+
+ print("End of Main\n Project: \t\t", PROJECT_NAME, "\n Use Case:\t",USE_CASE_NAME)
+
+if __name__ == "__main__":
+ main()
+
diff --git a/lib/auto/testcase/resiliency/AutoResilMgTestDef.py b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
new file mode 100644
index 0000000..cc3d0ef
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
@@ -0,0 +1,1182 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: management of test definitions
+
+# Functions and classes to manage and initialize test data relative to:
+# physical resources
+# cloud resources
+# VNFs
+# recipients (OS, cloud/VNF managers)
+# challenge definitions
+# optional metrics
+# test definitions
+# Storage of definition data in binary files (pickle), and test data results in .CSV files
+
+
+#docstring
+"""This module contains functions and classes to manage OPNFV Auto Test Data for Use Case 2: Resilience Improvements Through ONAP.
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+
+######################################################################
+# import statements
+import pickle
+import csv
+import sys
+from enum import Enum
+from datetime import datetime, timedelta
+
+# Constants with definition file names
+FILE_PHYSICAL_RESOURCES = "ResourcesPhysical.bin"
+FILE_CLOUD_RESOURCES = "ResourcesCloud.bin"
+FILE_VNFS_SERVICES = "ResourcesVNFServices.bin"
+FILE_RECIPIENTS = "Recipients.bin"
+FILE_TEST_CASES = "TestCases.bin"
+FILE_METRIC_DEFINITIONS = "DefinitionsMetrics.bin"
+FILE_CHALLENGE_DEFINITIONS = "DefinitionsChallenges.bin"
+FILE_TEST_DEFINITIONS = "DefinitionsTests.bin"
+
+
+######################################################################
+
+def read_list_bin(file_name):
+ """Generic function to extract a list from a binary file."""
+ try:
+ extracted_list = []
+ with open(file_name, "rb") as binary_file:
+ extracted_list = pickle.load(binary_file)
+ return extracted_list
+ except FileNotFoundError:
+ print("File not found: ",file_name)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+def write_list_bin(inserted_list, file_name):
+ """Generic function to write a list to a binary file (replace content)."""
+ try:
+ with open(file_name, "wb") as binary_file:
+ pickle.dump(inserted_list, binary_file)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+class AutoBaseObject:
+ """Base class for Auto project, with common attributes (ID, name)."""
+ def __init__ (self, param_ID, param_name):
+ self.ID = param_ID
+ self.name = param_name
+ # for display
+ def __repr__(self):
+ return ("ID="+str(self.ID)+" name="+self.name)
+ # for print
+ def __str__(self):
+ return ("ID="+str(self.ID)+" name="+self.name)
+
+
+def index_already_there(index, given_list):
+ """Generic function to check if an index already exists in a list of AutoBaseObject."""
+
+ # check if ID already exists
+ already_there = False
+ if len(given_list)>0:
+ for item in given_list:
+ if isinstance(item, AutoBaseObject):
+ if item.ID == index:
+ already_there = True
+ break
+ else:
+ print("Issue with list: item is not AutoBaseObject")
+ print(" index=\n",index)
+ sys.exit()
+ return already_there
+
+
+def get_indexed_item_from_list(index, given_list):
+ """Generic function to get an indexed entry from a list of AutoBaseObject."""
+
+ returned_item = None
+
+ if len(given_list)>0:
+ for item in given_list:
+ if isinstance(item, AutoBaseObject):
+ if item.ID == index:
+ returned_item = item
+ break
+ else:
+ print("Issue with list: item is not AutoBaseObject")
+ print(" index=\n",index)
+ sys.exit()
+ return returned_item
+
+
+def get_indexed_item_from_file(index, file_name):
+ """Generic function to get an indexed entry from a list of AutoBaseObject stored in a binary file."""
+
+ list_in_file = read_list_bin(file_name)
+ return get_indexed_item_from_list(index, list_in_file)
+
+
+
+
+
+######################################################################
+
+class TestCase(AutoBaseObject):
+ """Test Case class for Auto project."""
+ def __init__ (self, test_case_ID, test_case_name,
+ test_case_JIRA_URL):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, test_case_ID, test_case_name)
+
+ # specifics for this subclass
+
+ # Auto JIRA link
+ self.JIRA_URL = test_case_JIRA_URL
+
+
+# no need for functions to remove data: ever-growing library, arbitrary ID
+# initial version: should not even add data dynamically, in case object signature changes
+# better stick to initialization functions only to fill data, unless 100% sure signature does not change
+def add_test_case_to_file(test_case_ID, test_case_name, test_case_JIRA_URL):
+ """Function to add persistent data about test cases (in binary file)."""
+
+ test_cases = read_list_bin(FILE_TEST_CASES)
+
+ if index_already_there(test_case_ID, test_cases):
+ print("Test Case ID=",test_case_ID," is already defined and can't be added")
+ else:
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+ write_list_bin(test_cases, FILE_TEST_CASES)
+
+ return test_cases
+
+
+
+def init_test_cases():
+ """Function to initialize test case data."""
+ test_cases = []
+
+ # add info to list in memory, one by one, following signature values
+ test_case_ID = 1
+ test_case_name = "auto-resiliency-pif-001"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-9"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 2
+ test_case_name = "auto-resiliency-pif-002"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-10"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 3
+ test_case_name = "auto-resiliency-pif-003"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-11"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 4
+ test_case_name = "auto-resiliency-pif-004"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-12"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 5
+ test_case_name = "auto-resiliency-vif-001"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-13"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 6
+ test_case_name = "auto-resiliency-vif-002"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-14"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 7
+ test_case_name = "auto-resiliency-vif-003"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-15"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 8
+ test_case_name = "auto-resiliency-sec-001"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-16"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 9
+ test_case_name = "auto-resiliency-sec-002"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-17"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 10
+ test_case_name = "auto-resiliency-sec-003"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-18"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ # write list to binary file
+ write_list_bin(test_cases, FILE_TEST_CASES)
+
+ return test_cases
+
+
+######################################################################
+
+class TestDefinition(AutoBaseObject):
+ """Test Definition class for Auto project."""
+ def __init__ (self, test_def_ID, test_def_name,
+ test_def_challengeDefID,
+ test_def_testCaseID,
+ test_def_VNFIDs,
+ test_def_associatedMetricsIDs,
+ test_def_recipientIDs,
+ test_def_testCLICommandSent,
+ test_def_testAPICommandSent):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, test_def_ID, test_def_name)
+
+ # specifics for this subclass
+
+ # associated Challenge Definition (ID)
+ self.challenge_def_ID = test_def_challengeDefID
+ # associated Test Case (ID)
+ self.test_case_ID = test_def_testCaseID
+ # associated VNFs (list of IDs)
+ self.VNF_ID_list = test_def_VNFIDs
+ # associated Metrics (list of IDs)
+ self.associated_metrics_ID_list = test_def_associatedMetricsIDs
+ # associated Recipients (list of IDs)
+ self.recipient_ID_list = test_def_recipientIDs
+ # associated test CLI commands to Recipients (list of strings)
+ self.test_CLI_command_sent_list = test_def_testCLICommandSent
+ # associated test API commands to Recipients (list of data objects)
+ self.test_API_command_sent_list = test_def_testAPICommandSent
+
+
+def init_test_definitions():
+ """Function to initialize test definition data."""
+ test_definitions = []
+
+ # add info to list in memory, one by one, following signature values
+ test_def_ID = 1
+ test_def_name = "VM failure impact on virtual firewall (vFW VNF)"
+ test_def_challengeDefID = 1
+ test_def_testCaseID = 5
+ test_def_VNFIDs = [1]
+ test_def_associatedMetricsIDs = []
+ test_def_recipientIDs = [2]
+ test_def_testCLICommandSent = ["pwd"]
+ test_def_testAPICommandSent = ["data1","data2"]
+ test_definitions.append(TestDefinition(test_def_ID, test_def_name,
+ test_def_challengeDefID,
+ test_def_testCaseID,
+ test_def_VNFIDs,
+ test_def_associatedMetricsIDs,
+ test_def_recipientIDs,
+ test_def_testCLICommandSent,
+ test_def_testAPICommandSent))
+
+ # write list to binary file
+ write_list_bin(test_definitions, FILE_TEST_DEFINITIONS)
+
+ return test_definitions
+
+
+######################################################################
+
+class ChallengeType(Enum):
+ # server-level failures
+ COMPUTE_HOST_FAILURE = 100
+ DISK_FAILURE = 101
+ LINK_FAILURE = 102
+ NIC_FAILURE = 103
+ # network-level failures
+ OVS_BRIDGE_FAILURE = 200
+ # security stresses
+ HOST_TAMPERING = 300
+ HOST_INTRUSION = 301
+ NETWORK_INTRUSION = 302
+
+
+class ChallengeDefinition(AutoBaseObject):
+ """Challenge Definition class for Auto project."""
+ def __init__ (self, chall_def_ID, chall_def_name,
+ chall_def_challengeType,
+ chall_def_recipientID,
+ chall_def_impactedResourcesInfo,
+ chall_def_impactedResourceIDs,
+ chall_def_startChallengeCLICommandSent,
+ chall_def_stopChallengeCLICommandSent,
+ chall_def_startChallengeAPICommandSent,
+ chall_def_stopChallengeAPICommandSent):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, chall_def_ID, chall_def_name)
+
+ # specifics for this subclass
+
+ # info about challenge type, categorization
+ self.challenge_type = chall_def_challengeType
+ # recipient instance, to start/stop the challenge
+ self.recipient_ID = chall_def_recipientID
+ # free-form info about impacted resource(s)
+ self.impacted_resources_info = chall_def_impactedResourcesInfo
+ # impacted resources (list of IDs, usually only 1)
+ self.impacted_resource_ID_list = chall_def_impactedResourceIDs
+ # if CLI; can include hard-coded references to resources
+ self.start_challenge_CLI_command_sent = chall_def_startChallengeCLICommandSent
+ # if CLI; to restore to normal
+ self.stop_challenge_CLI_command_sent = chall_def_stopChallengeCLICommandSent
+ # if API; can include hard-coded references to resources
+ self.start_challenge_API_command_sent = chall_def_startChallengeAPICommandSent
+ # if API; to restore to normal
+ self.stop_challenge_API_command_sent = chall_def_stopChallengeAPICommandSent
+
+
+def init_challenge_definitions():
+ """Function to initialize challenge definition data."""
+ challenge_defs = []
+
+ # add info to list in memory, one by one, following signature values
+ chall_def_ID = 1
+ chall_def_name = "VM failure"
+ chall_def_challengeType = ChallengeType.COMPUTE_HOST_FAILURE
+ chall_def_recipientID = 1
+ chall_def_impactedResourcesInfo = "OpenStack VM on ctl02 in Arm pod"
+ chall_def_impactedResourceIDs = [2]
+ chall_def_startChallengeCLICommandSent = "service nova-compute stop"
+ chall_def_stopChallengeCLICommandSent = "service nova-compute restart"
+ chall_def_startChallengeAPICommandSent = []
+ chall_def_stopChallengeAPICommandSent = []
+
+ challenge_defs.append(ChallengeDefinition(chall_def_ID, chall_def_name,
+ chall_def_challengeType,
+ chall_def_recipientID,
+ chall_def_impactedResourcesInfo,
+ chall_def_impactedResourceIDs,
+ chall_def_startChallengeCLICommandSent,
+ chall_def_stopChallengeCLICommandSent,
+ chall_def_startChallengeAPICommandSent,
+ chall_def_stopChallengeAPICommandSent))
+
+ # write list to binary file
+ write_list_bin(challenge_defs, FILE_CHALLENGE_DEFINITIONS)
+
+ return challenge_defs
+
+
+######################################################################
+
+class Recipient(AutoBaseObject):
+ """Recipient class for Auto project."""
+ def __init__ (self, recipient_ID, recipient_name,
+ recipient_info,
+ recipient_versionInfo,
+ recipient_accessIPAddress,
+ recipient_accessURL,
+ recipient_userNameCreds,
+ recipient_passwordCreds,
+ recipient_keyCreds,
+ recipient_networkInfo):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, recipient_ID, recipient_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about recipient
+ self.info = recipient_info
+ # optional: version info
+ self.version_info = recipient_versionInfo
+ # optional: IP address of recipient
+ self.access_IP_address = recipient_accessIPAddress
+ # optional: URL of recipient
+ self.access_URL = recipient_accessURL
+ # optional: username for user/pwd credentials
+ self.username_creds = recipient_userNameCreds
+ # optional: password for user/pwd credentials
+ self.password_creds = recipient_passwordCreds
+ # optional: password for user/pwd credentials
+ self.key_creds = recipient_keyCreds
+ # optional: info about recipient's network (VPN, VCN, VN, Neutron, ...)
+ self.network_info = recipient_networkInfo
+
+
+def init_recipients():
+ """Function to initialize recipient data."""
+ test_recipients = []
+
+ # add info to list in memory, one by one, following signature values
+ recipient_ID = 1
+ recipient_name = "OpenStack on Arm pod"
+ recipient_info = "controller resolves to one of the CTL VMs"
+ recipient_versionInfo = ""
+ recipient_accessIPAddress = "172.16.10.10"
+ recipient_accessURL = ""
+ recipient_userNameCreds = "ali"
+ recipient_passwordCreds = "baba"
+ recipient_keyCreds = "ssh-rsa k7fjsnEFzESfg6phg"
+ recipient_networkInfo = "UNH IOL 172.16.0.0/16"
+
+ test_recipients.append(Recipient(recipient_ID, recipient_name,
+ recipient_info,
+ recipient_versionInfo,
+ recipient_accessIPAddress,
+ recipient_accessURL,
+ recipient_userNameCreds,
+ recipient_passwordCreds,
+ recipient_keyCreds,
+ recipient_networkInfo))
+
+ # write list to binary file
+ write_list_bin(test_recipients, FILE_RECIPIENTS)
+
+ return test_recipients
+
+
+######################################################################
+
+class MetricDefinition(AutoBaseObject):
+ """Metric Definition class for Auto project. Actual metrics are subclasses with specific calculation methods."""
+ def __init__ (self, metric_def_ID, metric_def_name,
+ metric_def_info):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, metric_def_ID, metric_def_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about metric: formula, etc.
+ self.info = metric_def_info
+
+
+class MetricValue:
+ """Object for storing a measurement of a Metric Definition for Auto project, with common attributes
+ (value, timestamp, metric_def_ID).
+ """
+ def __init__ (self, param_value, param_timestamp, param_metric_def_ID):
+ self.value = param_value
+ self.timestamp = param_timestamp
+ self.metric_def_ID = param_metric_def_ID
+ # for display
+ def __repr__(self):
+ return ("metric_def_ID="+str(self.metric_def_ID)+
+ " value="+str(self.value)+
+ " timestamp="+self.timestamp.strftime("%Y-%m-%d %H:%M:%S"))
+ # for print
+ def __str__(self):
+ return ("metric_def_ID="+str(self.metric_def_ID)+
+ " value="+str(self.value)+
+ " timestamp="+self.timestamp.strftime("%Y-%m-%d %H:%M:%S"))
+
+
+class RecoveryTimeDef(MetricDefinition):
+ """Recovery Time Metric Definition class for Auto project.
+ Formula: recovery_time = time_restoration_detected - time_challenge_started
+ (measured duration between start of challenge (failure, stress, ...) and detection of restoration).
+ Enter values as datetime objects.
+ """
+ def compute (self,
+ time_challenge_started, time_restoration_detected):
+ """time_challenge_started: datetime object, time at which challenge was started;
+ time_restoration_detected: datetime object, time at which restoration was detected;
+ returns a MetricValue containing a timedelta object as value.
+ """
+
+ # a few checks first
+ if time_challenge_started > time_restoration_detected:
+ print("time_challenge_started should be <= time_restoration_detected")
+ print("time_challenge_started=",time_challenge_started," time_restoration_detected=",time_restoration_detected)
+ sys.exit() # stop entire program, because fomulas MUST be correct
+
+ measured_metric_value = time_restoration_detected - time_challenge_started #difference between 2 datetime is a timedelta
+ timestamp = datetime.now()
+
+ return MetricValue(measured_metric_value, timestamp, self.ID)
+
+
+class UptimePercentageDef(MetricDefinition):
+ """Uptime Percentage Metric Definition class for Auto project.
+ Formula: uptime / (reference_time - planned_downtime))
+ Enter values in same unit (e.g., all in seconds, or all in minutes, or all in hours, etc.).
+ """
+ def compute (self,
+ measured_uptime, reference_time, planned_downtime):
+ """measured_uptime: amount of time the service/system/resource was up and running;
+ reference_time: amount of time during which the measurement was made;
+ planned_downtime: amount to time during reference_time, which was planned to be down;
+ returns a MetricValue object, with a value between 0 and 100.
+ """
+
+ # a few checks first
+ if measured_uptime < 0.0:
+ print("measured_uptime should be >= 0.0")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because fomulas MUST be correct
+ if reference_time <= 0.0:
+ print("reference_time should be > 0.0")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because fomulas MUST be correct
+ if planned_downtime < 0.0:
+ print("planned_downtime should be >= 0.0")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because fomulas MUST be correct
+ if reference_time < planned_downtime:
+ print("reference_time should be >= planned_downtime")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because fomulas MUST be correct
+ if measured_uptime > reference_time:
+ print("measured_uptime should be <= reference_time")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because fomulas MUST be correct
+ if measured_uptime > (reference_time - planned_downtime):
+ print("measured_uptime should be <= (reference_time - planned_downtime)")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because fomulas MUST be correct
+
+ measured_metric_value = 100 * measured_uptime / (reference_time - planned_downtime)
+ timestamp = datetime.now()
+
+ return MetricValue(measured_metric_value, timestamp, self.ID)
+
+
+
+def init_metric_definitions():
+ """Function to initialize metric definition data."""
+ metric_definitions = []
+
+ # add info to list in memory, one by one, following signature values
+ metric_def_ID = 1
+ metric_def_name = "Recovery Time"
+ metric_def_info = "Measures time taken by ONAP to restore a VNF"
+ metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,
+ metric_def_info))
+
+ metric_def_ID = 2
+ metric_def_name = "Uptime Percentage"
+ metric_def_info = "Measures ratio of uptime to reference time, not counting planned downtime"
+ metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,
+ metric_def_info))
+
+
+ # write list to binary file
+ write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)
+
+ return metric_definitions
+
+
+
+######################################################################
+
+class PhysicalResource(AutoBaseObject):
+ """Physical Resource class for Auto project."""
+ def __init__ (self, phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, phys_resrc_ID, phys_resrc_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about physical resource
+ self.info = phys_resrc_info
+ # optional: main IP address of physical resource (e.g. management interface for a server)
+ self.IP_address = phys_resrc_IPAddress
+ # optional: main MAC address of physical resource
+ self.MAC_address = phys_resrc_MACAddress
+
+
+def init_physical_resources():
+ """Function to initialize physical resource data."""
+ test_physical_resources = []
+
+ # add info to list in memory, one by one, following signature values
+ phys_resrc_ID = 1
+ phys_resrc_name = "small-cavium-1"
+ phys_resrc_info = "Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
+ phys_resrc_IPAddress = "10.10.50.12"
+ phys_resrc_MACAddress = ""
+
+ test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress))
+
+ # write list to binary file
+ write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)
+
+ return test_physical_resources
+
+
+######################################################################
+
+class CloudVirtualResource(AutoBaseObject):
+ """Cloud Virtual Resource class for Auto project."""
+ def __init__ (self, cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, cldvirtres_ID, cldvirtres_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about cloud virtual resource
+ self.info = cldvirtres_info
+ # optional: main IP address of cloud virtual resource (e.g. management interface for a virtual router)
+ self.IP_address = cldvirtres_IPAddress
+ # optional: URL address of cloud virtual resource
+ self.URL = cldvirtres_URL
+ # optional: related/associated physical resources (if known and useful or interesting, list of integer IDs)
+ self.related_phys_rsrc_ID_list = cldvirtres_related_phys_rsrcIDs
+
+
+def init_cloud_virtual_resources():
+ """Function to initialize cloud virtual resource data."""
+ test_cldvirt_resources = []
+
+ # add info to list in memory, one by one, following signature values
+ cldvirtres_ID = 1
+ cldvirtres_name = "nova-compute-1"
+ cldvirtres_info = "nova VM in Arm pod"
+ cldvirtres_IPAddress = "50.60.70.80"
+ cldvirtres_URL = "http://50.60.70.80:8080"
+ cldvirtres_related_phys_rsrcIDs = [1,3]
+
+ test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs))
+
+ # write list to binary file
+ write_list_bin(test_cldvirt_resources, FILE_CLOUD_RESOURCES)
+
+ return test_cldvirt_resources
+
+
+######################################################################
+
+class VNFService(AutoBaseObject):
+ """VNF or e2e Service class for Auto project."""
+ def __init__ (self, vnf_serv_ID, vnf_serv_name,
+ vnf_serv_info,
+ vnf_serv_IPAddress,
+ vnf_serv_URL,
+ vnf_serv_related_phys_rsrcIDs,
+ vnf_serv_related_cloudvirt_rsrcIDs):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, vnf_serv_ID, vnf_serv_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about VNF / e2e Service
+ self.info = vnf_serv_info
+ # optional: main IP address of VNF / e2e Service (e.g. management interface for a vCPE)
+ self.IP_address = vnf_serv_IPAddress
+ # optional: URL address of VNF / e2e Service
+ self.URL = vnf_serv_URL
+ # optional: related/associated physical resources (if known and useful or interesting, list of integer IDs)
+ self.related_phys_rsrc_ID_list = vnf_serv_related_phys_rsrcIDs
+ # optional: related/associated cloud virtual resources (if known and useful or interesting, list of integer IDs)
+ self.related_cloud_virt_rsrc_ID_list = vnf_serv_related_cloudvirt_rsrcIDs
+
+
+def init_VNFs_Services():
+ """Function to initialize VNFs and e2e Services data."""
+ test_VNFs_Services = []
+
+ # add info to list in memory, one by one, following signature values
+ vnf_serv_ID = 1
+ vnf_serv_name = "vCPE-1"
+ vnf_serv_info = "virtual CPE in Arm pod"
+ vnf_serv_IPAddress = "5.4.3.2"
+ vnf_serv_URL = "http://5.4.3.2:8080"
+ vnf_serv_related_phys_rsrcIDs = [2,4,6]
+ vnf_serv_related_cloudvirt_rsrcIDs = [1,2]
+
+ test_VNFs_Services.append(VNFService(vnf_serv_ID, vnf_serv_name,
+ vnf_serv_info,
+ vnf_serv_IPAddress,
+ vnf_serv_URL,
+ vnf_serv_related_phys_rsrcIDs,
+ vnf_serv_related_cloudvirt_rsrcIDs))
+
+ # write list to binary file
+ write_list_bin(test_VNFs_Services, FILE_VNFS_SERVICES)
+
+ return test_VNFs_Services
+
+
+
+######################################################################
+
+class TimeStampedStringList:
+ """This is a utility class for Auto project, for execution classes (ChallengeExecution and TestExecution).
+ It stores a list of timestrings and timestamps them.
+ """
+ def __init__ (self):
+ self.__string_list = []
+ self.__timestamp_list = []
+
+ def append_to_list(self, string_to_append):
+ """Append an object to a list of strings and adds a timestamp."""
+ if type(string_to_append)==str:
+ current_time = datetime.now()
+ self.__string_list.append(string_to_append)
+ self.__timestamp_list.append(current_time) # timestamp will have the same index as string
+ else:
+ print("appended object must be a string, string_to_append=",string_to_append)
+ sys.exit() # stop entire program, because string MUST be correct
+
+ def get_raw_list(self):
+ return self.__string_list
+
+ def get_raw_list_timestamps(self):
+ return self.__timestamp_list
+
+ def get_timestamped_strings(self):
+ """return a list of strings with timestamps as prefixes (not showing microseconds)."""
+ ret_list = []
+ i = 0
+ while i < len(self.__string_list):
+ ret_list.append(self.__timestamp_list[i].strftime("%Y-%m-%d %H:%M:%S")+" "+self.__string_list[i])
+ i += 1
+ return ret_list
+
+ def length(self):
+ return len(self.__string_list)
+
+
+######################################################################
+
+class ChallengeExecution(AutoBaseObject):
+ """Class for Auto project, tracking the execution details of a Challenge Definition,
+ with a method to dump all results to a CSV file.
+ """
+ def __init__ (self, chall_exec_ID, chall_exec_name,
+ chall_exec_challDefID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, chall_exec_ID, chall_exec_name)
+
+ # specifics for this subclass
+
+ # associated Challenge Definition (ID)
+ self.challenge_def_ID = chall_exec_challDefID
+
+ # attributes getting values during execution
+
+ # associated Start and Stop times (when Challenge was started and stopped)
+ self.start_time = None
+ self.stop_time = None
+ # log: list of strings, to capture any interesting or significant event
+ self.log = TimeStampedStringList()
+ # list of CLI responses
+ self.CLI_responses = TimeStampedStringList()
+ # list of API responses (convert to strings)
+ self.API_responses = TimeStampedStringList()
+
+ def write_to_csv(self):
+ """Generic function to dump all Challenge Execution data in a CSV file."""
+
+ dump_list = []
+
+ # add rows one by one, each as a list, even if only 1 element
+
+ dump_list.append(["challenge execution ID",self.ID])
+ dump_list.append(["challenge execution name",self.name])
+
+ dump_list.append(["challenge definition ID",self.challenge_def_ID])
+ challenge_def_name = get_indexed_item_from_file(self.challenge_def_ID, FILE_CHALLENGE_DEFINITIONS)
+ dump_list.append(["challenge definition name",challenge_def_name])
+
+ if self.start_time != None:
+ dump_list.append(["challenge start time",self.start_time.strftime("%Y-%m-%d %H:%M:%S")])
+ if self.stop_time != None:
+ dump_list.append(["challenge stop time",self.stop_time.strftime("%Y-%m-%d %H:%M:%S")])
+
+ if self.log.length() > 0 :
+ dump_list.append(["Log:"])
+ for item in self.log.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.CLI_responses.length() > 0 :
+ dump_list.append(["CLI responses:"])
+ for item in self.CLI_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.API_responses.length() > 0 :
+ dump_list.append(["API responses:"])
+ for item in self.API_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ try:
+ # output CSV file name: challDefExec + ID + start time + .csv
+ file_name = "challDefExec" + "{0:0=3d}".format(self.challenge_def_ID) + "-" + self.start_time.strftime("%Y-%m-%d-%H-%M-%S") + ".csv"
+ with open(file_name, "w", newline="") as file:
+ csv_file_writer = csv.writer(file)
+ csv_file_writer.writerows(dump_list)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+
+######################################################################
+
+class TimeStampedMetricValueList:
+ """This is a utility class for Auto project, for the test execution class (TestExecution).
+ It stores a list of Metric Values (with their respective timestamps).
+ """
+ def __init__ (self):
+ self.__metric_value_list = []
+
+ def append_to_list(self, metric_value_to_append):
+ """Append a metric value (MetricValue) to the list. MetricValue already has a timestamp attribute."""
+ if type(metric_value_to_append)==MetricValue:
+ self.__metric_value_list.append(metric_value_to_append)
+ else:
+ print("appended object must be a MetricValue, metric_value_to_append=",metric_value_to_append)
+ sys.exit() # stop entire program, because metric_value_to_append MUST be correct
+
+ def get_raw_list(self):
+ return self.__metric_value_list
+
+ def get_timestamped_metric_values_as_strings(self):
+ """Return a list of strings with metric values and timestamps as prefixes (not showing microseconds).
+ Also show the metric def ID in parentheses.
+ """
+ ret_list = []
+ i = 0
+ while i < len(self.__metric_value_list):
+ ret_list.append(self.__metric_value_list[i].timestamp.strftime("%Y-%m-%d %H:%M:%S") + " " +
+ str(self.__metric_value_list[i].value) +
+ "(" + str(self.__metric_value_list[i].metric_def_ID) + ")")
+ i += 1
+ return ret_list
+
+ def length(self):
+ return len(self.__metric_value_list)
+
+
+
+######################################################################
+
+class TestExecution(AutoBaseObject):
+ """Class for Auto project, tracking the execution details of a Test Definition,
+ with a method to dump all results to a CSV file.
+ """
+ def __init__ (self, test_exec_ID, test_exec_name,
+ test_exec_testDefID,
+ test_exec_challengeExecID,
+ test_exec_userID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, test_exec_ID, test_exec_name)
+
+ # specifics for this subclass
+
+ # associated Test Definition (ID)
+ self.test_def_ID = test_exec_testDefID
+ # associated Challenge Execution (ID) (execution instance of a challenge definition); get challenge start time from it;
+ self.challenge_exec_ID = test_exec_challengeExecID
+ # associated User (ID)
+ self.user_ID = test_exec_userID
+
+ # attributes getting values during execution
+
+ # associated Start and Finish times (when test was started and finished)
+ self.start_time = None
+ self.finish_time = None
+ # time when the challenge was started [datetime]; same value as associated ChallengeExecution.start_time;
+ # keep a copy here for print convenience;
+ self.challenge_start_time = None
+ # time when the VNF/service restoration (by ONAP) was detected by the test code [datetime]
+ self.restoration_detection_time = None
+ # key metric: recovery time, defined as time elapsed between start of challenge and restoration detection [timedelta]
+ self.recovery_time = None
+ # list of associated metric values
+ self.associated_metric_values = TimeStampedMetricValueList()
+ # log: list of strings, to capture any interesting or significant event
+ self.log = TimeStampedStringList()
+ # list of CLI responses
+ self.CLI_responses = TimeStampedStringList()
+ # list of API responses (convert to strings)
+ self.API_responses = TimeStampedStringList()
+
+
+ def write_to_csv(self):
+ """Generic function to dump all Test Execution data in a CSV file."""
+
+ dump_list = []
+
+ # add rows one by one, each as a list, even if only 1 element
+
+ dump_list.append(["test execution ID",self.ID])
+ dump_list.append(["test execution name",self.name])
+
+ dump_list.append(["test definition ID",self.test_def_ID])
+ test_def_name = get_indexed_item_from_file(self.test_def_ID, FILE_TEST_DEFINITIONS)
+ dump_list.append(["test definition name",test_def_name])
+
+ dump_list.append(["associated challenge execution ID",self.challenge_exec_ID])
+ dump_list.append(["user ID",self.user_ID])
+
+ if self.start_time != None:
+ dump_list.append(["test start time",self.start_time.strftime("%Y-%m-%d %H:%M:%S")])
+
+ if self.finish_time != None:
+ dump_list.append(["test finish time",self.finish_time.strftime("%Y-%m-%d %H:%M:%S")])
+
+ if self.challenge_start_time != None:
+ dump_list.append(["challenge stop time",self.challenge_start_time.strftime("%Y-%m-%d %H:%M:%S")])
+ if self.restoration_detection_time != None:
+ dump_list.append(["restoration detection time",self.restoration_detection_time.strftime("%Y-%m-%d %H:%M:%S")])
+ if self.recovery_time != None:
+ if self.recovery_time.value != None:
+ if type(self.recovery_time.value)==timedelta:
+ # timedelta: days and seconds are attributes, total_seconds() is a method
+ dump_list.append(["MEASURED RECOVERY TIME (s)",self.recovery_time.value.total_seconds()])
+ rtday = self.recovery_time.value.days
+ rthrs = self.recovery_time.value.seconds // 3600
+ rtmin = (self.recovery_time.value.seconds % 3600) // 60
+ rtsec = self.recovery_time.value.seconds % 60
+ rtmil = self.recovery_time.value.microseconds
+ dump_list.append(["MEASURED RECOVERY TIME (days, hours, mins, seconds, microseconds)",
+ rtday, rthrs, rtmin, rtsec, rtmil])
+
+ if self.associated_metric_values.length() > 0 :
+ dump_list.append(["Metric Values:"])
+ for item in self.associated_metric_values.get_timestamped_metric_values_as_strings():
+ dump_list.append([item])
+
+ if self.log.length() > 0 :
+ dump_list.append(["Log:"])
+ for item in self.log.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.CLI_responses.length() > 0 :
+ dump_list.append(["CLI responses:"])
+ for item in self.CLI_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.API_responses.length() > 0 :
+ dump_list.append(["API responses:"])
+ for item in self.API_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ try:
+ # output CSV file name: testDefExec + ID + start time + .csv
+ file_name = "testDefExec" + "{0:0=3d}".format(self.test_def_ID) + "-" + self.start_time.strftime("%Y-%m-%d-%H-%M-%S") + ".csv"
+ with open(file_name, "w", newline="") as file:
+ csv_file_writer = csv.writer(file)
+ csv_file_writer.writerows(dump_list)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+######################################################################
+def dump_all_binaries_to_CSV():
+ """Get all content from all binary files, and dump everything in a snapshot CSV file."""
+ ## TODO
+ timenow = datetime.now()
+
+
+######################################################################
+def main():
+
+ tcs = init_test_cases()
+ print(tcs)
+
+ test_case_ID = 33
+ test_case_name = "auto-resiliency-xyz"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-400"
+ add_test_case_to_file(test_case_ID, test_case_name, test_case_JIRA_URL)
+ print(read_list_bin(FILE_TEST_CASES))
+
+ print(get_indexed_item_from_file(3,FILE_TEST_CASES))
+ print(get_indexed_item_from_file(257,FILE_TEST_CASES))
+
+ print("tcs[4]=",tcs[4])
+ print(tcs[4].ID)
+ print(tcs[4].name)
+ print(tcs[4].JIRA_URL)
+
+ print()
+
+ tds = init_test_definitions()
+ print(tds)
+ td = get_indexed_item_from_file(1,FILE_TEST_DEFINITIONS)
+ print(td)
+
+ print()
+
+ rcps = init_recipients()
+ print(rcps)
+ rcp = get_indexed_item_from_file(1,FILE_RECIPIENTS)
+ print(rcp)
+
+ print()
+
+ challgs = init_challenge_definitions()
+ print(challgs)
+ chall = get_indexed_item_from_file(1,FILE_CHALLENGE_DEFINITIONS)
+ print(chall)
+
+ print()
+
+ metricdefs = init_metric_definitions()
+ print(metricdefs)
+
+ metricdef = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS)
+ print(metricdef)
+ t1 = datetime(2018,4,1,15,10,12,500000)
+ t2 = datetime(2018,4,1,15,13,43,200000)
+ r1 = metricdef.compute(t1,t2)
+ print(r1)
+ print()
+
+ metricdef = get_indexed_item_from_file(2,FILE_METRIC_DEFINITIONS)
+ print(metricdef)
+ r1 = metricdef.compute(735, 1000, 20)
+ r2 = metricdef.compute(980, 1000, 20)
+ r3 = metricdef.compute(920.0, 1000.0, 0.0)
+ r4 = metricdef.compute(920.0, 1500.0, 500.0)
+ r5 = metricdef.compute(919.99999, 1000.0, 0.000001)
+ print(r1)
+ print(r2)
+ print(r3)
+ print(r4)
+ print(r5)
+
+ print()
+
+ physRs = init_physical_resources()
+ print(physRs)
+ physR = get_indexed_item_from_file(1,FILE_PHYSICAL_RESOURCES)
+ print(physR)
+
+ print()
+
+ cloudRs = init_cloud_virtual_resources()
+ print(cloudRs)
+ cloudR = get_indexed_item_from_file(1,FILE_CLOUD_RESOURCES)
+ print(cloudR)
+
+ print()
+
+ VNFs = init_VNFs_Services()
+ print(VNFs)
+ VNF = get_indexed_item_from_file(1,FILE_VNFS_SERVICES)
+ print(VNF)
+
+ print()
+
+ ce1 = ChallengeExecution(1,"essai challenge execution",1)
+ ce1.start_time = datetime.now()
+ ce1.log.append_to_list("challenge execution log event 1")
+ ce1.log.append_to_list("challenge execution log event 2")
+ ce1.CLI_responses.append_to_list("challenge execution CLI response 1")
+ ce1.log.append_to_list("challenge execution log event 3")
+ ce1.CLI_responses.append_to_list("challenge execution CLI response 2")
+ ce1.log.append_to_list("challenge execution log event 4")
+ ce1.log.append_to_list("challenge execution log event 5")
+ ce1.API_responses.append_to_list("challenge execution API response 1")
+ ce1.log.append_to_list("challenge execution log event 6")
+ print("log length: ", ce1.log.length())
+ print(ce1.log.get_timestamped_strings())
+ print("CLI_responses length: ", ce1.CLI_responses.length())
+ print(ce1.CLI_responses.get_timestamped_strings())
+ print("API_responses length: ", ce1.API_responses.length())
+ print(ce1.API_responses.get_timestamped_strings())
+ ce1.stop_time = datetime.now()
+ ce1.write_to_csv()
+
+ print()
+
+ te1 = TestExecution(1,"essai test execution",1,1,"Gerard")
+ te1.start_time = datetime.now()
+ te1.challenge_start_time = ce1.start_time # illustrate how to set test execution challenge start time
+ print("te1.challenge_start_time:",te1.challenge_start_time)
+
+ te1.log.append_to_list("test execution log event 1")
+ te1.log.append_to_list("test execution log event 2")
+ te1.CLI_responses.append_to_list("test execution CLI response 1")
+ te1.CLI_responses.append_to_list("test execution CLI response 2")
+
+ metricdef = get_indexed_item_from_file(2,FILE_METRIC_DEFINITIONS) # get a metric definition, some ID
+ print(metricdef)
+ r1 = metricdef.compute(735, 1000, 20) # compute a metric value
+ print(r1)
+ te1.associated_metric_values.append_to_list(r1) # append a measured metric value to test execution
+ r1 = metricdef.compute(915, 1000, 20) # compute a metric value
+ print(r1)
+ te1.associated_metric_values.append_to_list(r1) # append a measured metric value to test execution
+
+ te1.log.append_to_list("test execution log event 3")
+ te1.API_responses.append_to_list("test execution API response 1")
+
+ print("log length: ", te1.log.length())
+ print(te1.log.get_timestamped_strings())
+ print("CLI_responses length: ", te1.CLI_responses.length())
+ print(te1.CLI_responses.get_timestamped_strings())
+ print("API_responses length: ", te1.API_responses.length())
+ print(te1.API_responses.get_timestamped_strings())
+ print("associated_metric_values length: ", te1.associated_metric_values.length())
+ print(te1.associated_metric_values.get_timestamped_metric_values_as_strings())
+
+ te1.restoration_detection_time = datetime.now()
+ print("te1.restoration_detection_time:",te1.restoration_detection_time)
+ metricdef = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS) # get Recovery Time metric definition: ID=1
+ print(metricdef)
+ r1 = metricdef.compute(te1.challenge_start_time, te1.restoration_detection_time) # compute a metric value, for Recovery time
+ te1.recovery_time = r1 # assignment could be direct, i.e. te1.recovery_time = metricdef.compute(...)
+
+ te1.finish_time = datetime.now() # test execution is finished
+ te1.write_to_csv()
+
+ print()
+
+ print("\nCiao")
+
+if __name__ == "__main__":
+ main()
+
+
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilRunTest.py b/lib/auto/testcase/resiliency/AutoResilRunTest.py
new file mode 100644
index 0000000..2ceba40
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilRunTest.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: execution of tests
+# (might merge this module with Main module)
+## Receive/retrieve chosen test def info
+##pre-test (pings, etc.)
+##launch test:
+## create execution instances of Test and Challenge
+## simulate challenge
+## get time T1
+## loop:
+## wait for VNF recovery
+## optional other metrics
+## store data and logs
+## get time T2
+## stop challenge
+## reset (with ONAP MSO)
+## store data and logs
+##post-tests
+##logs
+
+
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/vnf/vbng/MANIFEST.json b/lib/auto/testcase/vnf/vbng/MANIFEST.json
new file mode 100644
index 0000000..0b34111
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbng/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vbng.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vbng.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env
new file mode 100644
index 0000000..be4f972
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env
@@ -0,0 +1,35 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ brgemu_bng_private_net_id: zdfw1bngin01_private
+ brgemu_bng_private_subnet_id: zdfw1bngin01_sub_private
+ bng_gmux_private_net_id: zdfw1bngmux01_private
+ bng_gmux_private_subnet_id: zdfw1bngmux01_sub_private
+ onap_private_net_id: PUT THE ONAP PRIVATE NETWORK NAME HERE
+ onap_private_subnet_id: PUT THE ONAP PRIVATE SUBNETWORK NAME HERE
+ onap_private_net_cidr: 10.0.0.0/16
+ cpe_signal_net_id: zdfw1cpe01_private
+ cpe_signal_subnet_id: zdfw1cpe01_sub_private
+ brgemu_bng_private_net_cidr: 10.3.0.0/24
+ bng_gmux_private_net_cidr: 10.1.0.0/24
+ cpe_signal_private_net_cidr: 10.4.0.0/24
+ vbng_private_ip_0: 10.3.0.1
+ vbng_private_ip_1: 10.0.101.10
+ vbng_private_ip_2: 10.4.0.3
+ vbng_private_ip_3: 10.1.0.10
+ vbng_name_0: zdcpe1cpe01bng01
+ vnf_id: vCPE_Infrastructure_Metro_vBNG_demo_app
+ vf_module_id: vCPE_Intrastructure_Metro_vBNG
+ dcae_collector_ip: 10.0.4.102
+ dcae_collector_port: 8080
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vbng_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-radius-client-for-vbng/src/patches/Vpp-Integrate-FreeRADIUS-Client-for-vBNG.patch
diff --git a/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml
new file mode 100644
index 0000000..3dd7ca0
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml
@@ -0,0 +1,288 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE virtual Broadband Network Gateway (vBNG)
+
+##############
+# #
+# PARAMETERS #
+# #
+##############
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ brgemu_bng_private_net_id:
+ type: string
+ label: vBNG IN private network name or ID
+ description: Private network that connects vBRG to vBNG
+ brgemu_bng_private_subnet_id:
+ type: string
+ label: vBNG IN private sub-network name or ID
+ description: vBNG IN private sub-network name or ID
+ brgemu_bng_private_net_cidr:
+ type: string
+ label: vBNG IN private network CIDR
+ description: The CIDR of the input side of vBNG private network
+ bng_gmux_private_net_id:
+ type: string
+ label: vBNG vGMUX private network name or ID
+ description: Private network that connects vBNG to vGMUX
+ bng_gmux_private_subnet_id:
+ type: string
+ label: vBNG vGMUX private sub-network name or ID
+ description: vBNG vGMUX private sub-network name or ID
+ bng_gmux_private_net_cidr:
+ type: string
+ label: vGMUX private network CIDR
+ description: The CIDR of the input side of vGMUX private network
+ onap_private_net_id:
+ type: string
+ label: ONAP management network name or ID
+ description: Private network that connects ONAP components and the VNF
+ onap_private_subnet_id:
+ type: string
+ label: ONAP management sub-network name or ID
+ description: Private sub-network that connects ONAP components and the VNF
+ onap_private_net_cidr:
+ type: string
+ label: ONAP private network CIDR
+ description: The CIDR of the protected private network
+ cpe_signal_net_id:
+ type: string
+ label: vCPE private network name or ID
+ description: Private network that connects vCPE elements with vCPE infrastructure elements
+ cpe_signal_subnet_id:
+ type: string
+ label: vCPE private sub-network name or ID
+ description: vCPE private sub-network name or ID
+ cpe_signal_private_net_cidr:
+ type: string
+ label: vAAA private network CIDR
+ description: The CIDR of the vAAA private network
+ vbng_private_ip_0:
+ type: string
+ label: vBNG IN private IP address
+ description: Private IP address that is assigned to the vBNG IN
+ vbng_private_ip_1:
+ type: string
+ label: vBNG private IP address towards the ONAP management network
+ description: Private IP address that is assigned to the vBNG to communicate with ONAP components
+ vbng_private_ip_2:
+ type: string
+ label: vBNG to CPE_SIGNAL private IP address
+ description: Private IP address that is assigned to the vBNG in the CPE_SIGNAL network
+ vbng_private_ip_3:
+ type: string
+ label: vBNG to vGMUX private IP address
+ description: Private IP address that is assigned to the vBNG to vGMUX port
+ vbng_name_0:
+ type: string
+ label: vBNG name
+ description: Name of the vBNG
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ dcae_collector_ip:
+ type: string
+ label: DCAE collector IP address
+ description: IP address of the DCAE collector
+ dcae_collector_port:
+ type: string
+ label: DCAE collector port
+ description: Port of the DCAE collector
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ vpp_patch_url:
+ type: string
+ label: VPP Patch URL
+ description: URL for VPP patch for vBNG
+
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+
+ # Virtual BNG Instantiation
+ vbng_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: brgemu_bng_private_net_id }
+ fixed_ips: [{"subnet": { get_param: brgemu_bng_private_subnet_id }, "ip_address": { get_param: vbng_private_ip_0 }}]
+
+ vbng_private_1_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: onap_private_net_id }
+ fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vbng_private_ip_1 }}]
+
+ vbng_private_2_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: cpe_signal_net_id }
+ fixed_ips: [{"subnet": { get_param: cpe_signal_subnet_id }, "ip_address": { get_param: vbng_private_ip_2 }}]
+
+ vbng_private_3_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: bng_gmux_private_net_id }
+ fixed_ips: [{"subnet": { get_param: bng_gmux_private_subnet_id }, "ip_address": { get_param: vbng_private_ip_3 }}]
+
+ vbng_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vbng_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vbng_private_0_port }
+ - port: { get_resource: vbng_private_1_port }
+ - port: { get_resource: vbng_private_2_port }
+ - port: { get_resource: vbng_private_3_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __oam_ipaddr__: { get_param: vbng_private_ip_1 }
+ __brgemu_bng_net_ipaddr__: { get_param: vbng_private_ip_0 }
+ __cpe_signal_net_ipaddr__: { get_param: vbng_private_ip_2 }
+ __bng_gmux_net_ipaddr__: { get_param: vbng_private_ip_3 }
+ __oam_cidr__: { get_param: onap_private_net_cidr }
+ __brgemu_bng_cidr__: { get_param: brgemu_bng_private_net_cidr }
+ __cpe_signal_cidr__: { get_param: cpe_signal_private_net_cidr }
+ __bng_gmux_cidr__: { get_param: bng_gmux_private_net_cidr }
+ __dcae_collector_ip__: { get_param: dcae_collector_ip }
+ __dcae_collector_port__: { get_param: dcae_collector_port }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __vpp_patch_url__ : { get_param: vpp_patch_url }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ echo "__brgemu_bng_net_ipaddr__" > /opt/config/brgemu_bng_net_ipaddr.txt
+ echo "__cpe_signal_net_ipaddr__" > /opt/config/cpe_signal_net_ipaddr.txt
+ echo "__bng_gmux_net_ipaddr__" > /opt/config/bng_gmux_net_ipaddr.txt
+ echo "__oam_ipaddr__" > /opt/config/oam_ipaddr.txt
+ echo "__oam_cidr__" > /opt/config/oam_cidr.txt
+ echo "__bng_gmux_cidr__" > /opt/config/bng_gmux_net_cidr.txt
+ echo "__cpe_signal_cidr__" > /opt/config/cpe_signal_net_cidr.txt
+ echo "__brgemu_bng_cidr__" > /opt/config/brgemu_bng_net_cidr.txt
+ echo "__dcae_collector_ip__" > /opt/config/dcae_collector_ip.txt
+ echo "__dcae_collector_port__" > /opt/config/dcae_collector_port.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__vpp_patch_url__" > /opt/config/vpp_patch_url.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_bng_install.sh -o /opt/v_bng_install.sh
+ cd /opt
+ chmod +x v_bng_install.sh
+ ./v_bng_install.sh
diff --git a/lib/auto/testcase/vnf/vbrgemu/MANIFEST.json b/lib/auto/testcase/vnf/vbrgemu/MANIFEST.json
new file mode 100644
index 0000000..3911256
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbrgemu/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vbrgemu.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vbrgemu.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env
new file mode 100644
index 0000000..7719f55
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env
@@ -0,0 +1,28 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 or vbrg-base-ubuntu-16-04 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ compile_state: PUT THE COMPILE STATE (done, auto or build)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ vbrgemu_bng_private_net_id: zdfw1bngin01_private
+ vbrgemu_bng_private_subnet_id: zdfw1bngin01_sub_private
+ vbrgemu_bng_private_net_cidr: 10.3.0.0/24
+ #vbrgemu_private_net_id: zdfw1vbrgemu01_private
+ #vbrgemu_private_net_cidr: 192.168.1.0/24
+ vbrgemu_private_ip_0: 10.3.0.4
+ #vbrgemu_private_ip_1: 192.168.1.1
+ sdnc_ip: 10.0.7.1
+ vbrgemu_name_0: zdcpe1cpe01brgemu01
+ vnf_id: vCPE_Infrastructure_BGREMU_demo_app
+ vf_module_id: vCPE_Customer_BRGEMU
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vbrgemu_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ hc2vpp_source_repo_url: https://gerrit.fd.io/r/hc2vpp
+ hc2vpp_source_repo_branch: stable/1704
+ vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-option-82-for-vbrg/src/patches/VPP-Add-Option82-Nat-Filter-For-vBRG.patch
diff --git a/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml
new file mode 100644
index 0000000..a786995
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml
@@ -0,0 +1,253 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE vBRG Emulator (vBRGEMU)
+
+#######################################################################
+# #
+# PARAMETERS #
+# #
+# 0_port should get IP address from DHCP discover through vBNG #
+# DCAE is not monitoring the BRGEMULATOR #
+#######################################################################
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ vbrgemu_bng_private_net_id:
+ type: string
+ label: vBNG private network name or ID
+ description: Private network that connects vBRGEMU to vBNG
+ vbrgemu_bng_private_subnet_id:
+ type: string
+ label: vBNG private sub-network name or ID
+ description: vBNG private sub-network name or ID
+ vbrgemu_bng_private_net_cidr:
+ type: string
+ label: vBNG IN private network CIDR
+ description: The CIDR of the input side of vBNG private network
+ # vbrgemu_private_net_id:
+ # type: string
+ # label: vBRGEMU Home private network name or ID
+ # description: Private network that connects vBRGEMU to local devices
+ #vbrgemu_private_net_cidr:
+ # type: string
+ # label: vBRGEMU Home private network CIDR
+ # description: The CIDR of the input side of vBRGEMU Home private network
+ vbrgemu_private_ip_0:
+ type: string
+ label: vGW private IP address
+ description: Private IP address towards the BRGEMU-BNG network
+ #vbrgemu_private_ip_1:
+ # type: string
+ # label: vGW private IP address
+ # description: Private IP address towards the BRGEMU private network
+ vbrgemu_name_0:
+ type: string
+ label: vGW name
+ description: Name of the vGW
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ hc2vpp_source_repo_url:
+ type: string
+ label: Honeycomb Source Git Repo
+ description: URL for Honeycomb source codes
+ hc2vpp_source_repo_branch:
+ type: string
+ label: Honeycomb Source Git Branch
+ description: Git Branch for the Honeycomb source codes
+ vpp_patch_url:
+ type: string
+ label: VPP Patch URL
+ description: URL for VPP patch for vBRG Emulator
+ sdnc_ip:
+ type: string
+ label: SDNC ip address
+ description: SDNC ip address used to set NAT
+ compile_state:
+ type: string
+ label: Compile State
+ description: State to compile code or not
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+ #vbrgemu_private_network:
+ # type: OS::Neutron::Net
+ # properties:
+ # name: { get_param: vbrgemu_private_net_id }
+
+ #vbrgemu_private_subnet:
+ # type: OS::Neutron::Subnet
+ # properties:
+ # name: { get_param: vbrgemu_private_net_id }
+ # network_id: { get_resource: vbrgemu_private_network }
+ # cidr: { get_param: vbrgemu_private_net_cidr }
+
+ # Virtual BRG Emulator Instantiation
+ # 0_port should get IP address from DHCP discover through vBNG once the VNF is running
+ vbrgemu_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: vbrgemu_bng_private_net_id }
+ fixed_ips: [{"subnet": { get_param: vbrgemu_bng_private_subnet_id }, "ip_address": { get_param: vbrgemu_private_ip_0 }}]
+
+ #vbrgemu_private_1_port:
+ # type: OS::Neutron::Port
+ # properties:
+ # network: { get_resource: vbrgemu_private_network }
+ # fixed_ips: [{"subnet": { get_resource: vbrgemu_private_subnet }, "ip_address": { get_param: vbrgemu_private_ip_1 }}]
+
+ vbrgemu_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vbrgemu_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vbrgemu_private_0_port }
+ #- port: { get_resource: vbrgemu_private_1_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ # __brgemu_net_ipaddr__: { get_param: vbrgemu_private_ip_1 }
+ # __brgemu_cidr__: { get_param: vbrgemu_private_net_cidr }
+ __brgemu_bng_private_net_cidr__: { get_param: vbrgemu_bng_private_net_cidr }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __hc2vpp_source_repo_url__ : { get_param: hc2vpp_source_repo_url }
+ __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
+ __vpp_patch_url__ : { get_param: vpp_patch_url }
+ __sdnc_ip__ : { get_param: sdnc_ip }
+ __compile_state__ : { get_param: compile_state }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ #echo "__brgemu_net_ipaddr__" > /opt/config/brgemu_net_ipaddr.txt
+ #echo "__brgemu_cidr__" > /opt/config/brgemu_net_cidr.txt
+ echo "__brgemu_bng_private_net_cidr__" > /opt/config/brgemu_bng_private_net_cidr.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
+ echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
+ echo "__vpp_patch_url__" > /opt/config/vpp_patch_url.txt
+ echo "__sdnc_ip__" > /opt/config/sdnc_ip.txt
+ echo "__compile_state__" > /opt/config/compile_state.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_brgemu_install.sh -o /opt/v_brgemu_install.sh
+ cd /opt
+ chmod +x v_brgemu_install.sh
+ ./v_brgemu_install.sh
diff --git a/lib/auto/testcase/vnf/vgmux/MANIFEST.json b/lib/auto/testcase/vnf/vgmux/MANIFEST.json
new file mode 100644
index 0000000..1f62167
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgmux/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vgmux.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vgmux.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env
new file mode 100644
index 0000000..e81afa7
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env
@@ -0,0 +1,35 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ bng_gmux_private_net_id: zdfw1bngmux01_private
+ bng_gmux_private_subnet_id: zdfw1bngmux01_sub_private
+ mux_gw_private_net_id: zdfw1muxgw01_private
+ mux_gw_private_subnet_id: zdfw1muxgw01_sub_private
+ onap_private_net_id: PUT THE ONAP PRIVATE NETWORK NAME HERE
+ onap_private_subnet_id: PUT THE ONAP PRIVATE SUBNETWORK NAME HERE
+ onap_private_net_cidr: 10.0.0.0/16
+ bng_gmux_private_net_cidr: 10.1.0.0/24
+ mux_gw_private_net_cidr: 10.5.0.0/24
+ vgmux_private_ip_0: 10.1.0.20
+ vgmux_private_ip_1: 10.0.101.20
+ vgmux_private_ip_2: 10.5.0.20
+ vgmux_name_0: zdcpe1cpe01mux01
+ vnf_id: vCPE_Infrastructure_vGMUX_demo_app
+ vf_module_id: vCPE_Intrastructure_Metro_vGMUX
+ dcae_collector_ip: 10.0.4.102
+ dcae_collector_port: 8080
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vgmux_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ hc2vpp_source_repo_url: https://gerrit.fd.io/r/hc2vpp
+ hc2vpp_source_repo_branch: stable/1704
+ vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/Vpp-Add-VES-agent-for-vG-MUX.patch
+ hc2vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/Hc2vpp-Add-VES-agent-for-vG-MUX.patch
+ libevel_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/vCPE-vG-MUX-libevel-fixup.patch
diff --git a/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml
new file mode 100644
index 0000000..ecdb1b1
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml
@@ -0,0 +1,281 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE Infrastructue Metro vGMUX
+
+##############
+# #
+# PARAMETERS #
+# #
+##############
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ bng_gmux_private_net_id:
+ type: string
+ label: vBNG vGMUX private network name or ID
+ description: Private network that connects vBNG to vGMUX
+ bng_gmux_private_subnet_id:
+ type: string
+ label: vBNG vGMUX private sub-network name or ID
+ description: vBNG vGMUX private sub-network name or ID
+ bng_gmux_private_net_cidr:
+ type: string
+ label: vBNG vGMUX private network CIDR
+ description: The CIDR of the vBNG-vGMUX private network
+ mux_gw_private_net_id:
+ type: string
+ label: vGMUX vGWs network name or ID
+ description: Private network that connects vGMUX to vGWs
+ mux_gw_private_subnet_id:
+ type: string
+ label: vGMUX vGWs sub-network name or ID
+ description: vGMUX vGWs sub-network name or ID
+ mux_gw_private_net_cidr:
+ type: string
+ label: vGMUX private network CIDR
+ description: The CIDR of the vGMUX private network
+ onap_private_net_id:
+ type: string
+ label: ONAP management network name or ID
+ description: Private network that connects ONAP components and the VNF
+ onap_private_subnet_id:
+ type: string
+ label: ONAP management sub-network name or ID
+ description: Private sub-network that connects ONAP components and the VNF
+ onap_private_net_cidr:
+ type: string
+ label: ONAP private network CIDR
+ description: The CIDR of the protected private network
+ vgmux_private_ip_0:
+ type: string
+ label: vGMUX private IP address towards the vBNG-vGMUX private network
+ description: Private IP address that is assigned to the vGMUX to communicate with the vBNG
+ vgmux_private_ip_1:
+ type: string
+ label: vGMUX private IP address towards the ONAP management network
+ description: Private IP address that is assigned to the vGMUX to communicate with ONAP components
+ vgmux_private_ip_2:
+ type: string
+ label: vGMUX private IP address towards the vGMUX-vGW private network
+ description: Private IP address that is assigned to the vGMUX to communicate with vGWs
+ vgmux_name_0:
+ type: string
+ label: vGMUX name
+ description: Name of the vGMUX
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ dcae_collector_ip:
+ type: string
+ label: DCAE collector IP address
+ description: IP address of the DCAE collector
+ dcae_collector_port:
+ type: string
+ label: DCAE collector port
+ description: Port of the DCAE collector
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ hc2vpp_source_repo_url:
+ type: string
+ label: Honeycomb Source Git Repo
+ description: URL for Honeycomb source codes
+ hc2vpp_source_repo_branch:
+ type: string
+ label: Honeycomb Source Git Branch
+ description: Git Branch for the Honeycomb source codes
+ vpp_patch_url:
+ type: string
+ label: VPP Patch URL
+ description: URL for VPP patch for vG-MUX
+ hc2vpp_patch_url:
+ type: string
+ label: Honeycomb Patch URL
+ description: URL for Honeycomb patch for vG-MUX
+ libevel_patch_url:
+ type: string
+ label: libevel Patch URL
+ description: URL for libevel patch for vG-MUX
+
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+
+ # Virtual GMUX Instantiation
+ vgmux_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: bng_gmux_private_net_id }
+ fixed_ips: [{"subnet": { get_param: bng_gmux_private_subnet_id }, "ip_address": { get_param: vgmux_private_ip_0 }}]
+
+ vgmux_private_1_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: onap_private_net_id }
+ fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vgmux_private_ip_1 }}]
+
+ vgmux_private_2_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: mux_gw_private_net_id }
+ fixed_ips: [{"subnet": { get_param: mux_gw_private_subnet_id }, "ip_address": { get_param: vgmux_private_ip_2 }}]
+
+ vgmux_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vgmux_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vgmux_private_0_port }
+ - port: { get_resource: vgmux_private_1_port }
+ - port: { get_resource: vgmux_private_2_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __bng_mux_net_ipaddr__ : { get_param: vgmux_private_ip_0 }
+ __oam_ipaddr__ : { get_param: vgmux_private_ip_1 }
+ __mux_gw_net_ipaddr__ : { get_param: vgmux_private_ip_2 }
+ __bng_mux_net_cidr__ : { get_param: bng_gmux_private_net_cidr }
+ __oam_cidr__ : { get_param: onap_private_net_cidr }
+ __mux_gw_net_cidr__ : { get_param: mux_gw_private_net_cidr }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __hc2vpp_source_repo_url__ : { get_param: hc2vpp_source_repo_url }
+ __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
+ __vpp_patch_url__ : { get_param: vpp_patch_url }
+ __hc2vpp_patch_url__ : { get_param: hc2vpp_patch_url }
+ __libevel_patch_url__ : { get_param: libevel_patch_url }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ echo "__bng_mux_net_ipaddr__" > /opt/config/bng_mux_net_ipaddr.txt
+ echo "__oam_ipaddr__" > /opt/config/oam_ipaddr.txt
+ echo "__mux_gw_net_ipaddr__" > /opt/config/mux_gw_net_ipaddr.txt
+ echo "__bng_mux_net_cidr__" > /opt/config/bng_mux_net_cidr.txt
+ echo "__oam_cidr__" > /opt/config/oam_cidr.txt
+ echo "__mux_gw_net_cidr__" > /opt/config/mux_gw_net_cidr.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__vpp_patch_url__" > /opt/config/vpp_patch_url.txt
+ echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
+ echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
+ echo "__hc2vpp_patch_url__" > /opt/config/hc2vpp_patch_url.txt
+ echo "__libevel_patch_url__" > /opt/config/libevel_patch_url.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_gmux_install.sh -o /opt/v_gmux_install.sh
+ cd /opt
+ chmod +x v_gmux_install.sh
+ ./v_gmux_install.sh
diff --git a/lib/auto/testcase/vnf/vgw/MANIFEST.json b/lib/auto/testcase/vnf/vgw/MANIFEST.json
new file mode 100644
index 0000000..8178b1e
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgw/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vgw.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vgw.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env
new file mode 100644
index 0000000..f1cadb8
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env
@@ -0,0 +1,32 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ mux_gw_private_net_id: zdfw1muxgw01_private
+ mux_gw_private_subnet_id: zdfw1muxgw01_sub_private
+ mux_gw_private_net_cidr: 10.5.0.0/24
+ cpe_public_net_id: zdfw1cpe01_public
+ cpe_public_subnet_id: zdfw1cpe01_sub_public
+ cpe_public_net_cidr: 10.2.0.0/24
+ onap_private_net_id: PUT THE ONAP PRIVATE NETWORK NAME HERE
+ onap_private_subnet_id: PUT THE ONAP PRIVATE SUBNETWORK NAME HERE
+ onap_private_net_cidr: 10.0.0.0/16
+ vgw_private_ip_0: 10.5.0.21
+ vgw_private_ip_1: 10.0.101.30
+ vgw_private_ip_2: 10.2.0.3
+ vgw_name_0: zdcpe1cpe01gw01
+ vnf_id: vCPE_Infrastructure_GW_demo_app
+ vf_module_id: vCPE_Customer_GW
+ dcae_collector_ip: 10.0.4.102
+ dcae_collector_port: 8080
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vgw_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ hc2vpp_source_repo_url: https://gerrit.fd.io/r/hc2vpp
+ hc2vpp_source_repo_branch: stable/1704
diff --git a/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml
new file mode 100644
index 0000000..173ba6d
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml
@@ -0,0 +1,261 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE vGateway (vG)
+
+##############
+# #
+# PARAMETERS #
+# #
+##############
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ mux_gw_private_net_id:
+ type: string
+ label: vGMUX private network name or ID
+ description: Private network that connects vGMUX to vGWs
+ mux_gw_private_subnet_id:
+ type: string
+ label: vGMUX private sub-network name or ID
+ description: vGMUX private sub-network name or ID
+ mux_gw_private_net_cidr:
+ type: string
+ label: vGMUX private network CIDR
+ description: The CIDR of the vGMUX private network
+ onap_private_net_id:
+ type: string
+ label: ONAP management network name or ID
+ description: Private network that connects ONAP components and the VNF
+ onap_private_subnet_id:
+ type: string
+ label: ONAP management sub-network name or ID
+ description: Private sub-network that connects ONAP components and the VNF
+ onap_private_net_cidr:
+ type: string
+ label: ONAP private network CIDR
+ description: The CIDR of the protected private network
+ cpe_public_net_id:
+ type: string
+ label: vCPE network that emulates internetmanagement name or ID
+ description: Private network that connects vGW to emulated internet
+ cpe_public_subnet_id:
+ type: string
+ label: vCPE Public subnet
+ description: vCPE Public subnet
+ cpe_public_net_cidr:
+ type: string
+ label: vCPE public network CIDR
+ description: The CIDR of the vCPE public
+ vgw_private_ip_0:
+ type: string
+ label: vGW private IP address towards the vGMUX
+ description: Private IP address that is assigned to the vGW to communicate with vGMUX
+ vgw_private_ip_1:
+ type: string
+ label: vGW private IP address towards the ONAP management network
+ description: Private IP address that is assigned to the vGW to communicate with ONAP components
+ vgw_private_ip_2:
+ type: string
+ label: vGW private IP address towards the vCPE public network
+ description: Private IP address that is assigned to the vGW to communicate with vCPE public network
+ vgw_name_0:
+ type: string
+ label: vGW name
+ description: Name of the vGW
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ dcae_collector_ip:
+ type: string
+ label: DCAE collector IP address
+ description: IP address of the DCAE collector
+ dcae_collector_port:
+ type: string
+ label: DCAE collector port
+ description: Port of the DCAE collector
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ hc2vpp_source_repo_url:
+ type: string
+ label: Honeycomb Source Git Repo
+ description: URL for Honeycomb source codes
+ hc2vpp_source_repo_branch:
+ type: string
+ label: Honeycomb Source Git Branch
+ description: Git Branch for the Honeycomb source codes
+
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+ # Virtual GW Instantiation
+ vgw_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: mux_gw_private_net_id }
+ fixed_ips: [{"subnet": { get_param: mux_gw_private_subnet_id }, "ip_address": { get_param: vgw_private_ip_0 }}]
+
+ vgw_private_1_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: onap_private_net_id }
+ fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vgw_private_ip_1 }}]
+
+ vgw_private_2_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: cpe_public_net_id}
+ fixed_ips: [{"subnet": { get_param: cpe_public_subnet_id }, "ip_address": { get_param: vgw_private_ip_2 }}]
+
+ vgw_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vgw_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vgw_private_0_port }
+ - port: { get_resource: vgw_private_1_port }
+ - port: { get_resource: vgw_private_2_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __mux_gw_private_net_ipaddr__ : { get_param: vgw_private_ip_0 }
+ __oam_ipaddr__ : { get_param: vgw_private_ip_1 }
+ __oam_cidr__ : { get_param: onap_private_net_cidr }
+ __cpe_public_net_cidr__ : { get_param: cpe_public_net_cidr }
+ __mux_gw_private_net_cidr__ : { get_param: mux_gw_private_net_cidr }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __hc2vpp_source_repo_url__ : { get_param: hc2vpp_source_repo_url }
+ __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ echo "__oam_ipaddr__" > /opt/config/oam_ipaddr.txt
+ echo "__oam_cidr__" > /opt/config/oam_cidr.txt
+ echo "__cpe_public_net_cidr__" > /opt/config/cpe_public_net_cidr.txt
+ echo "__mux_gw_private_net_ipaddr__" > /opt/config/mux_gw_private_net_ipaddr.txt
+ echo "__mux_gw_private_net_cidr__" > /opt/config/mux_gw_private_net_cidr.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
+ echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_gw_install.sh -o /opt/v_gw_install.sh
+ cd /opt
+ chmod +x v_gw_install.sh
+ ./v_gw_install.sh
+
diff --git a/lib/auto/util/__init__.py b/lib/auto/util/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/auto/util/__init__.py
diff --git a/lib/auto/util/openstack_lib.py b/lib/auto/util/openstack_lib.py
new file mode 100644
index 0000000..4b62b72
--- /dev/null
+++ b/lib/auto/util/openstack_lib.py
@@ -0,0 +1,332 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""Module to manage OpenStack"""
+
+import os
+import re
+import sys
+import time
+import traceback
+
+from keystoneauth1 import loading
+from keystoneauth1 import session
+from keystoneclient import client as keystoneclient
+from glanceclient import client as glanceclient
+from neutronclient.neutron import client as neutronclient
+from novaclient import client as novaclient
+from heatclient import client as heatclient
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+DEFAULT_API_VERSION = '2'
+DEFAULT_ORCHESTRATION_API_VERSION = '1'
+
+openrc_base_key = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD']
+
+openrc_v3_exkey = ['OS_PROJECT_NAME',
+ 'OS_USER_DOMAIN_NAME',
+ 'OS_PROJECT_DOMAIN_NAME']
+
+openrc_v2_exkey = ['OS_TENANT_NAME']
+
+openrc_vars_mapping = {
+ 'OS_USERNAME': 'username',
+ 'OS_PASSWORD': 'password',
+ 'OS_AUTH_URL': 'auth_url',
+ 'OS_TENANT_NAME': 'tenant_name',
+ 'OS_USER_DOMAIN_NAME': 'user_domain_name',
+ 'OS_PROJECT_DOMAIN_NAME': 'project_domain_name',
+ 'OS_PROJECT_NAME': 'project_name',
+ }
+
+
+def check_identity_api_version():
+ identity_api_version = os.getenv('OS_IDENTITY_API_VERSION')
+ auth_url = os.getenv('OS_AUTH_URL')
+ if not auth_url:
+ raise RuntimeError("Require env var: OS_AUTH_URL")
+ auth_url_parse = auth_url.split('/')
+ url_tail = auth_url_parse[-1] if auth_url_parse[-1] else auth_url_parse[-2]
+ url_identity_version = url_tail.strip('v')
+ if not identity_api_version and \
+ identity_api_version != url_identity_version:
+ raise RuntimeError("identity api version not consistent")
+ return url_identity_version
+
+
+def check_image_api_version():
+ image_api_version = os.getenv('OS_IMAGE_API_VERSION')
+ if image_api_version:
+ return image_api_version
+ else:
+ return DEFAULT_API_VERSION
+
+
+def check_network_api_version():
+ network_api_version = os.getenv('OS_NETWORK_API_VERSION')
+ if network_api_version:
+ return network_api_version
+ else:
+ return DEFAULT_API_VERSION
+
+
+def check_compute_api_version():
+ compute_api_version = os.getenv('OS_COMPUTE_API_VERSION')
+ if compute_api_version:
+ return compute_api_version
+ else:
+ return DEFAULT_API_VERSION
+
+
+def check_orchestration_api_version():
+ orchestration_api_version = os.getenv('OS_ORCHESTRATION_API_VERSION')
+ if orchestration_api_version:
+ return orchestration_api_version
+ else:
+ return DEFAULT_ORCHESTRATION_API_VERSION
+
+
+def get_project_name(creds):
+ identity_version = check_identity_api_version()
+ if identity_version == '3':
+ return creds["project_name"]
+ elif identity_version == '2':
+ return creds["tenant_name"]
+ else:
+ raise RuntimeError("Unsupported identity version")
+
+
+def get_credentials():
+ creds = {}
+ creds_env_key = openrc_base_key
+ identity_api_version = check_identity_api_version()
+
+ if identity_api_version == '3':
+ creds_env_key += openrc_v3_exkey
+ elif identity_api_version == '2':
+ creds_env_key += openrc_v2_exkey
+ else:
+ raise RuntimeError("Unsupported identity version")
+
+ for env_key in creds_env_key:
+ env_value = os.getenv(env_key)
+ if env_value is None:
+ raise RuntimeError("Require env var: %s" % env_key)
+ else:
+ creds_var = openrc_vars_mapping.get(env_key)
+ creds.update({creds_var: env_value})
+
+ return creds
+
+
+def get_session_auth(creds):
+ loader = loading.get_plugin_loader('password')
+ auth = loader.load_from_options(**creds)
+ return auth
+
+
+def get_session(creds):
+ auth = get_session_auth(creds)
+ cacert = os.getenv('OS_CACERT')
+ insecure = os.getenv('OS_INSECURE', '').lower() == 'true'
+ verify = cacert if cacert else not insecure
+ return session.Session(auth=auth, verify=verify)
+
+
+def get_keystone_client(creds):
+ identity_api_version = check_identity_api_version()
+ sess = get_session(creds)
+ return keystoneclient.Client(identity_api_version,
+ session=sess,
+ interface=os.getenv('OS_INTERFACE', 'admin'))
+
+
+def get_glance_client(creds):
+ image_api_version = check_image_api_version()
+ sess = get_session(creds)
+ return glanceclient.Client(image_api_version, session=sess)
+
+
+def get_neutron_client(creds):
+ network_api_version = check_network_api_version()
+ sess = get_session(creds)
+ return neutronclient.Client(network_api_version, session=sess)
+
+
+def get_nova_client(creds):
+ compute_api_version = check_compute_api_version()
+ sess = get_session(creds)
+ return novaclient.Client(compute_api_version, session=sess)
+
+
+def get_heat_client(creds):
+ orchestration_api_version = check_orchestration_api_version()
+ sess = get_session(creds)
+ return heatclient.Client(orchestration_api_version, session=sess)
+
+
+def get_domain_id(keystone_client, domain_name):
+ domains = keystone_client.domains.list()
+ domain_id = None
+ for domain in domains:
+ if domain.name == domain_name:
+ domain_id = domain.id
+ break
+ return domain_id
+
+
+def get_project_id(keystone_client, project_name):
+ identity_version = check_identity_api_version()
+ if identity_version == '3':
+ projects = keystone_client.projects.list()
+ elif identity_version == '2':
+ projects = keystone_client.tenants.list()
+ else:
+ raise RuntimeError("Unsupported identity version")
+ project_id = None
+ for project in projects:
+ if project.name == project_name:
+ project_id = project.id
+ break
+ return project_id
+
+
+def get_image_id(glance_client, image_name):
+ images = glance_client.images.list()
+ image_id = None
+ for image in images:
+ if image.name == image_name:
+ image_id = image.id
+ break
+ return image_id
+
+
+def get_network_id(neutron_client, network_name):
+ networks = neutron_client.list_networks()['networks']
+ network_id = None
+ for network in networks:
+ if network['name'] == network_name:
+ network_id = network['id']
+ break
+ return network_id
+
+
+def get_security_group_id(neutron_client, secgroup_name, project_id=None):
+ security_groups = neutron_client.list_security_groups()['security_groups']
+ secgroup_id = []
+ for security_group in security_groups:
+ if security_group['name'] == secgroup_name:
+ secgroup_id = security_group['id']
+ if security_group['project_id'] == project_id or project_id is None:
+ break
+ return secgroup_id
+
+
+def get_secgroup_rule_id(neutron_client, secgroup_id, json_body):
+ secgroup_rules = \
+ neutron_client.list_security_group_rules()['security_group_rules']
+ secgroup_rule_id = None
+ for secgroup_rule in secgroup_rules:
+ rule_match = True
+ for key, value in json_body['security_group_rule'].items():
+ rule_match = rule_match and (value == secgroup_rule[key])
+ if rule_match:
+ secgroup_rule_id = secgroup_rule['id']
+ break
+ return secgroup_rule_id
+
+
+def get_keypair_id(nova_client, keypair_name):
+ keypairs = nova_client.keypairs.list()
+ keypair_id = None
+ for keypair in keypairs:
+ if keypair.name == keypair_name:
+ keypair_id = keypair.id
+ break
+ return keypair_id
+
+
+def create_project(keystone_client, creds, project_name, project_desc):
+ project_id = get_project_id(keystone_client, project_name)
+ if project_id:
+ return project_id
+
+ identity_version = check_identity_api_version()
+
+ if identity_version == '3':
+ domain_name = creds["user_domain_name"]
+ domain_id = get_domain_id(keystone_client, domain_name)
+ project = keystone_client.projects.create(
+ name=project_name,
+ description=project_desc,
+ domain=domain_id,
+ enabled=True)
+ elif identity_version == '2':
+ project = keystone_client.tenants.create(project_name,
+ project_desc,
+ enabled=True)
+ else:
+ raise RuntimeError("Unsupported identity version")
+
+ return project.id
+
+
+def create_image(glance_client, image_name, image_path, disk_format="qcow2",
+ container_format="bare", visibility="public"):
+ if not os.path.isfile(image_path):
+ raise RuntimeError("Image file not found: %s" % image_path)
+ image_id = get_image_id(glance_client, image_name)
+ if not image_id:
+ image = glance_client.images.create(name=image_name,
+ visibility=visibility,
+ disk_format=disk_format,
+ container_format=container_format)
+ image_id = image.id
+ with open(image_path) as image_data:
+ glance_client.images.upload(image_id, image_data)
+ return image_id
+
+
+def create_secgroup_rule(neutron_client, secgroup_id, protocol, direction,
+ port_range_min=None, port_range_max=None):
+ json_body = {'security_group_rule': {'direction': direction,
+ 'security_group_id': secgroup_id,
+ 'protocol': protocol}}
+
+ if bool(port_range_min) != bool(port_range_max):
+ raise RuntimeError("Start or end of protocol range is empty: [ %s, %s ]"
+ % (port_range_min, port_range_max))
+ elif port_range_min and port_range_max:
+ json_body['security_group_rule'].update({'port_range_min':
+ port_range_min})
+ json_body['security_group_rule'].update({'port_range_max':
+ port_range_max})
+
+ secgroup_id = get_secgroup_rule_id(neutron_client, secgroup_id, json_body)
+ if not secgroup_id:
+ neutron_client.create_security_group_rule(json_body)
+ return secgroup_id
+
+
+def update_compute_quota(nova_client, project_id, quotas):
+ nova_client.quotas.update(project_id, **quotas)
+
+
+def create_keypair(nova_client, keypair_name, keypair_path):
+ keypair_id = get_keypair_id(nova_client, keypair_name)
+ if not keypair_id:
+ with open(os.path.expanduser(keypair_path), 'r') as public_key:
+ key_data = public_key.read().decode('utf-8')
+ keypair = nova_client.keypairs.create(name=keypair_name,
+ public_key=key_data)
+ keypair_id = keypair.id
+ return keypair_id
+
diff --git a/lib/auto/util/util.py b/lib/auto/util/util.py
new file mode 100644
index 0000000..0033900
--- /dev/null
+++ b/lib/auto/util/util.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""Utility Module"""
+
+import os
+import git
+import urllib
+import yaml
+import traceback
+from Crypto.PublicKey import RSA
+from yaml_type import literal_unicode
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+
+def folded_unicode_representer(dumper, data):
+ return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>')
+
+
+def literal_unicode_representer(dumper, data):
+ return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
+
+
+def unicode_representer(dumper, uni):
+ node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
+ return node
+
+
+def mkdir(path):
+ path = path.strip()
+ path = path.rstrip("\\")
+ isExist = os.path.exists(path)
+ if not isExist:
+ os.makedirs(path)
+ return True
+ else:
+ return False
+
+
+def download(url, file_path):
+ if os.path.exists(file_path):
+ return False
+ else:
+ urllib.urlretrieve(url, file_path)
+ return True
+
+
+def git_clone(git_repo, git_branch, clone_path):
+ if not os.path.exists(clone_path):
+ git.Repo.clone_from(git_repo, clone_path, branch=git_branch)
+
+
+def read_file(file_path):
+ with open(os.path.expanduser(file_path)) as fd:
+ return fd.read()
+
+
+def read_yaml(yaml_path):
+ with open(os.path.expanduser(yaml_path)) as fd:
+ return yaml.safe_load(fd)
+
+
+def write_yaml(yaml_data, yaml_path, default_style=False):
+ yaml.add_representer(literal_unicode, literal_unicode_representer)
+ yaml.add_representer(unicode, unicode_representer)
+ with open(os.path.expanduser(yaml_path), 'w') as fd:
+ return yaml.dump(yaml_data, fd,
+ default_flow_style=default_style)
+
+
+def create_keypair(prikey_path, pubkey_path, size=2048):
+ key = RSA.generate(size)
+ with open(os.path.expanduser(prikey_path), 'w') as prikey_file:
+ os.chmod(prikey_path, 0600)
+ prikey_file.write(key.exportKey('PEM'))
+ pubkey = key.publickey()
+ with open(os.path.expanduser(pubkey_path), 'w') as pubkey_file:
+ pubkey_file.write(pubkey.exportKey('OpenSSH'))
diff --git a/lib/auto/util/yaml_type.py b/lib/auto/util/yaml_type.py
new file mode 100644
index 0000000..352fc7d
--- /dev/null
+++ b/lib/auto/util/yaml_type.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+class folded_unicode(unicode): pass
+class literal_unicode(unicode): pass