aboutsummaryrefslogtreecommitdiffstats
path: root/testcases
diff options
context:
space:
mode:
authorMorgan Richomme <morgan.richomme@orange.com>2016-11-08 14:18:12 +0100
committerMorgan Richomme <morgan.richomme@orange.com>2016-11-09 16:55:45 +0100
commit107e61635c2ab1feb5263380ea63e21cf2e6e65b (patch)
tree4966b77605bd34a40f452b1d268868691e84d008 /testcases
parente74c9b347f2623eb1a3c477921a84da4c31b364f (diff)
Repo structure modification
- create functest subdirectory - rename unit tests - adapt path in exec and config files JIRA: FUNCTEST-525 Change-Id: Ifd5c6edfb5bda1b09f82848e2269ad5fbeb84d0a Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
Diffstat (limited to 'testcases')
-rwxr-xr-xtestcases/Controllers/ODL/OpenDaylightTesting.py224
-rw-r--r--testcases/Controllers/ODL/__init__.py0
-rw-r--r--testcases/Controllers/ONOS/Sfc/README.md21
-rwxr-xr-xtestcases/Controllers/ONOS/Sfc/Sfc.py177
-rw-r--r--testcases/Controllers/ONOS/Sfc/Sfc_fun.py863
-rw-r--r--testcases/Controllers/ONOS/Teston/Readme.txt5
-rw-r--r--testcases/Controllers/ONOS/Teston/__init__.py0
-rw-r--r--testcases/Controllers/ONOS/Teston/adapters/__init__.py0
-rw-r--r--testcases/Controllers/ONOS/Teston/adapters/client.py92
-rw-r--r--testcases/Controllers/ONOS/Teston/adapters/connection.py200
-rw-r--r--testcases/Controllers/ONOS/Teston/adapters/environment.py286
-rw-r--r--testcases/Controllers/ONOS/Teston/adapters/foundation.py99
-rw-r--r--testcases/Controllers/ONOS/Teston/dependencies/onos29
-rw-r--r--testcases/Controllers/ONOS/Teston/log/gitignore0
-rwxr-xr-xtestcases/Controllers/ONOS/Teston/onosfunctest.py270
-rw-r--r--testcases/Controllers/__init__.py0
-rwxr-xr-xtestcases/OpenStack/examples/create_instance_and_ip.py130
-rwxr-xr-xtestcases/OpenStack/healthcheck/healthcheck.sh261
-rw-r--r--testcases/OpenStack/rally/blacklist.txt18
-rw-r--r--testcases/OpenStack/rally/macro/macro.yaml97
-rwxr-xr-xtestcases/OpenStack/rally/run_rally-cert.py625
-rw-r--r--testcases/OpenStack/rally/scenario/full/opnfv-cinder.yaml266
-rw-r--r--testcases/OpenStack/rally/scenario/full/opnfv-heat.yaml140
-rw-r--r--testcases/OpenStack/rally/scenario/full/opnfv-neutron.yaml239
-rw-r--r--testcases/OpenStack/rally/scenario/full/opnfv-nova.yaml369
-rw-r--r--testcases/OpenStack/rally/scenario/opnfv-authenticate.yaml63
-rw-r--r--testcases/OpenStack/rally/scenario/opnfv-glance.yaml49
-rw-r--r--testcases/OpenStack/rally/scenario/opnfv-keystone.yaml92
-rw-r--r--testcases/OpenStack/rally/scenario/opnfv-quotas.yaml54
-rw-r--r--testcases/OpenStack/rally/scenario/opnfv-requests.yaml11
-rw-r--r--testcases/OpenStack/rally/scenario/opnfv-vm.yaml42
-rw-r--r--testcases/OpenStack/rally/scenario/sanity/opnfv-cinder.yaml84
-rw-r--r--testcases/OpenStack/rally/scenario/sanity/opnfv-heat.yaml42
-rw-r--r--testcases/OpenStack/rally/scenario/sanity/opnfv-neutron.yaml152
-rw-r--r--testcases/OpenStack/rally/scenario/sanity/opnfv-nova.yaml140
-rwxr-xr-xtestcases/OpenStack/rally/scenario/support/instance_dd_test.sh13
-rw-r--r--testcases/OpenStack/rally/scenario/templates/autoscaling_policy.yaml.template17
-rw-r--r--testcases/OpenStack/rally/scenario/templates/default.yaml.template1
-rw-r--r--testcases/OpenStack/rally/scenario/templates/random_strings.yaml.template13
-rw-r--r--testcases/OpenStack/rally/scenario/templates/resource_group.yaml.template13
-rw-r--r--testcases/OpenStack/rally/scenario/templates/server_with_ports.yaml.template64
-rw-r--r--testcases/OpenStack/rally/scenario/templates/server_with_volume.yaml.template43
-rw-r--r--testcases/OpenStack/rally/scenario/templates/updated_autoscaling_policy_inplace.yaml.template23
-rw-r--r--testcases/OpenStack/rally/scenario/templates/updated_random_strings_add.yaml.template19
-rw-r--r--testcases/OpenStack/rally/scenario/templates/updated_random_strings_delete.yaml.template11
-rw-r--r--testcases/OpenStack/rally/scenario/templates/updated_random_strings_replace.yaml.template19
-rw-r--r--testcases/OpenStack/rally/scenario/templates/updated_resource_group_increase.yaml.template16
-rw-r--r--testcases/OpenStack/rally/scenario/templates/updated_resource_group_reduce.yaml.template16
-rw-r--r--testcases/OpenStack/rally/task.yaml48
-rw-r--r--testcases/OpenStack/tempest/custom_tests/blacklist.txt96
-rw-r--r--testcases/OpenStack/tempest/custom_tests/defcore_req.txt122
-rwxr-xr-xtestcases/OpenStack/tempest/gen_tempest_conf.py124
-rwxr-xr-xtestcases/OpenStack/tempest/run_tempest.py471
-rwxr-xr-xtestcases/OpenStack/vPing/ping.sh13
-rwxr-xr-xtestcases/OpenStack/vPing/vping.py97
-rw-r--r--testcases/OpenStack/vPing/vping_util.py461
-rw-r--r--testcases/__init__.py0
-rwxr-xr-xtestcases/features/copper.py84
-rwxr-xr-xtestcases/features/doctor.py90
-rwxr-xr-xtestcases/features/domino.py87
-rwxr-xr-xtestcases/features/multisite.py21
-rwxr-xr-xtestcases/features/promise.py255
-rw-r--r--testcases/features/sfc/SSHUtils.py120
-rwxr-xr-xtestcases/features/sfc/compute_presetup_CI.bash27
-rwxr-xr-xtestcases/features/sfc/correct_classifier.bash37
-rwxr-xr-xtestcases/features/sfc/delete.sh15
-rw-r--r--testcases/features/sfc/ovs_utils.py117
-rwxr-xr-xtestcases/features/sfc/prepare_odl_sfc.bash38
-rwxr-xr-xtestcases/features/sfc/prepare_odl_sfc.py96
-rwxr-xr-xtestcases/features/sfc/server_presetup_CI.bash13
-rwxr-xr-xtestcases/features/sfc/sfc.py545
-rwxr-xr-xtestcases/features/sfc/sfc_change_classi.bash7
-rwxr-xr-xtestcases/features/sfc/sfc_colorado1.py596
-rwxr-xr-xtestcases/features/sfc/sfc_tacker.bash31
-rwxr-xr-xtestcases/features/sfc/tacker_client_install.sh43
-rw-r--r--testcases/features/sfc/test-vnfd1.yaml31
-rw-r--r--testcases/features/sfc/test-vnfd2.yaml31
-rw-r--r--testcases/security_scan/config.ini29
-rw-r--r--testcases/security_scan/connect.py244
-rw-r--r--testcases/security_scan/examples/xccdf-rhel7-server-upstream.ini29
-rw-r--r--testcases/security_scan/examples/xccdf-standard.ini29
-rw-r--r--testcases/security_scan/scripts/createfiles.py26
-rw-r--r--testcases/security_scan/scripts/internet_check.py25
-rwxr-xr-xtestcases/security_scan/security_scan.py215
-rw-r--r--testcases/vnf/vIMS/clearwater.py66
-rwxr-xr-xtestcases/vnf/vIMS/create_venv.sh44
-rw-r--r--testcases/vnf/vIMS/orchestrator.py234
-rw-r--r--testcases/vnf/vIMS/requirements.pip1
-rwxr-xr-xtestcases/vnf/vIMS/vIMS.py536
-rwxr-xr-xtestcases/vnf/vRNC/parser.py71
90 files changed, 0 insertions, 10673 deletions
diff --git a/testcases/Controllers/ODL/OpenDaylightTesting.py b/testcases/Controllers/ODL/OpenDaylightTesting.py
deleted file mode 100755
index 8c003abfd..000000000
--- a/testcases/Controllers/ODL/OpenDaylightTesting.py
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import argparse
-import errno
-import fileinput
-import os
-import re
-import sys
-import urlparse
-
-from robot.api import ExecutionResult, ResultVisitor
-from robot.errors import RobotError
-import robot.run
-from robot.utils.robottime import timestamp_to_secs
-
-from functest.core import TestCasesBase
-import functest.utils.functest_logger as ft_logger
-import functest.utils.openstack_utils as op_utils
-
-
-class ODLResultVisitor(ResultVisitor):
-
- def __init__(self):
- self._data = []
-
- def visit_test(self, test):
- output = {}
- output['name'] = test.name
- output['parent'] = test.parent.name
- output['status'] = test.status
- output['startime'] = test.starttime
- output['endtime'] = test.endtime
- output['critical'] = test.critical
- output['text'] = test.message
- output['elapsedtime'] = test.elapsedtime
- self._data.append(output)
-
- def get_data(self):
- return self._data
-
-
-class ODLTestCases(TestCasesBase.TestCasesBase):
-
- repos = "/home/opnfv/repos/"
- odl_test_repo = repos + "odl_test/"
- neutron_suite_dir = odl_test_repo + "csit/suites/openstack/neutron/"
- basic_suite_dir = odl_test_repo + "csit/suites/integration/basic/"
- res_dir = '/home/opnfv/functest/results/odl/'
- logger = ft_logger.Logger("opendaylight").getLogger()
-
- def __init__(self):
- self.case_name = "odl"
-
- @classmethod
- def set_robotframework_vars(cls, odlusername="admin", odlpassword="admin"):
- odl_variables_files = cls.odl_test_repo + 'csit/variables/Variables.py'
- try:
- for line in fileinput.input(odl_variables_files,
- inplace=True):
- print re.sub("AUTH = .*",
- ("AUTH = [u'" + odlusername + "', u'" +
- odlpassword + "']"),
- line.rstrip())
- return True
- except Exception as e:
- cls.logger.error("Cannot set ODL creds: %s" % str(e))
- return False
-
- def parse_results(self):
- result = ExecutionResult(self.res_dir + 'output.xml')
- visitor = ODLResultVisitor()
- result.visit(visitor)
- self.criteria = result.suite.status
- self.start_time = timestamp_to_secs(result.suite.starttime)
- self.stop_time = timestamp_to_secs(result.suite.endtime)
- self.details = {}
- self.details['description'] = result.suite.name
- self.details['tests'] = visitor.get_data()
-
- def main(self, **kwargs):
- dirs = [self.basic_suite_dir, self.neutron_suite_dir]
- try:
- odlusername = kwargs['odlusername']
- odlpassword = kwargs['odlpassword']
- variables = ['KEYSTONE:' + kwargs['keystoneip'],
- 'NEUTRON:' + kwargs['neutronip'],
- 'OSUSERNAME:"' + kwargs['osusername'] + '"',
- 'OSTENANTNAME:"' + kwargs['ostenantname'] + '"',
- 'OSPASSWORD:"' + kwargs['ospassword'] + '"',
- 'ODL_SYSTEM_IP:' + kwargs['odlip'],
- 'PORT:' + kwargs['odlwebport'],
- 'RESTCONFPORT:' + kwargs['odlrestconfport']]
- except KeyError as e:
- self.logger.error("Cannot run ODL testcases. Please check "
- "%s" % str(e))
- return self.EX_RUN_ERROR
- if self.set_robotframework_vars(odlusername, odlpassword):
- try:
- os.makedirs(self.res_dir)
- except OSError as e:
- if e.errno != errno.EEXIST:
- self.logger.exception(
- "Cannot create {}".format(self.res_dir))
- return self.EX_RUN_ERROR
- stdout_file = self.res_dir + 'stdout.txt'
- with open(stdout_file, 'w+') as stdout:
- robot.run(*dirs, variable=variables,
- output=self.res_dir + 'output.xml',
- log='NONE',
- report='NONE',
- stdout=stdout)
- stdout.seek(0, 0)
- self.logger.info("\n" + stdout.read())
- self.logger.info("ODL results were successfully generated")
- try:
- self.parse_results()
- self.logger.info("ODL results were successfully parsed")
- except RobotError as e:
- self.logger.error("Run tests before publishing: %s" %
- e.message)
- return self.EX_RUN_ERROR
- try:
- os.remove(stdout_file)
- except OSError:
- self.logger.warning("Cannot remove {}".format(stdout_file))
- return self.EX_OK
- else:
- return self.EX_RUN_ERROR
-
- def run(self):
- try:
- kclient = op_utils.get_keystone_client()
- keystone_url = kclient.service_catalog.url_for(
- service_type='identity', endpoint_type='publicURL')
- neutron_url = kclient.service_catalog.url_for(
- service_type='network', endpoint_type='publicURL')
- kwargs = {'keystoneip': urlparse.urlparse(keystone_url).hostname}
- kwargs['neutronip'] = urlparse.urlparse(neutron_url).hostname
- kwargs['odlip'] = kwargs['neutronip']
- kwargs['odlwebport'] = '8080'
- kwargs['odlrestconfport'] = '8181'
- kwargs['odlusername'] = 'admin'
- kwargs['odlpassword'] = 'admin'
- installer_type = None
- if 'INSTALLER_TYPE' in os.environ:
- installer_type = os.environ['INSTALLER_TYPE']
- kwargs['osusername'] = os.environ['OS_USERNAME']
- kwargs['ostenantname'] = os.environ['OS_TENANT_NAME']
- kwargs['ospassword'] = os.environ['OS_PASSWORD']
- if installer_type == 'fuel':
- kwargs['odlwebport'] = '8282'
- elif installer_type == 'apex':
- kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP']
- kwargs['odlwebport'] = '8181'
- elif installer_type == 'joid':
- kwargs['odlip'] = os.environ['SDN_CONTROLLER']
- elif installer_type == 'compass':
- kwargs['odlwebport'] = '8181'
- else:
- kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP']
- except KeyError as e:
- self.logger.error("Cannot run ODL testcases. "
- "Please check env var: "
- "%s" % str(e))
- return self.EX_RUN_ERROR
- except Exception:
- self.logger.exception("Cannot run ODL testcases.")
- return self.EX_RUN_ERROR
-
- return self.main(**kwargs)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('-k', '--keystoneip',
- help='Keystone IP',
- default='127.0.0.1')
- parser.add_argument('-n', '--neutronip',
- help='Neutron IP',
- default='127.0.0.1')
- parser.add_argument('-a', '--osusername',
- help='Username for OpenStack',
- default='admin')
- parser.add_argument('-b', '--ostenantname',
- help='Tenantname for OpenStack',
- default='admin')
- parser.add_argument('-c', '--ospassword',
- help='Password for OpenStack',
- default='admin')
- parser.add_argument('-o', '--odlip',
- help='OpenDaylight IP',
- default='127.0.0.1')
- parser.add_argument('-w', '--odlwebport',
- help='OpenDaylight Web Portal Port',
- default='8080')
- parser.add_argument('-r', '--odlrestconfport',
- help='OpenDaylight RESTConf Port',
- default='8181')
- parser.add_argument('-d', '--odlusername',
- help='Username for ODL',
- default='admin')
- parser.add_argument('-e', '--odlpassword',
- help='Password for ODL',
- default='admin')
- parser.add_argument('-p', '--pushtodb',
- help='Push results to DB',
- action='store_true')
-
- args = vars(parser.parse_args())
- odl = ODLTestCases()
- try:
- result = odl.main(**args)
- if result != TestCasesBase.TestCasesBase.EX_OK:
- sys.exit(result)
- if args['pushtodb']:
- sys.exit(odl.push_to_db())
- except Exception:
- sys.exit(TestCasesBase.TestCasesBase.EX_RUN_ERROR)
diff --git a/testcases/Controllers/ODL/__init__.py b/testcases/Controllers/ODL/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/testcases/Controllers/ODL/__init__.py
+++ /dev/null
diff --git a/testcases/Controllers/ONOS/Sfc/README.md b/testcases/Controllers/ONOS/Sfc/README.md
deleted file mode 100644
index ae63ee214..000000000
--- a/testcases/Controllers/ONOS/Sfc/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-SFC Script ReadMe File
-**********************
-
-Topology
----------
-
-Validated with the Fuel Enviroment.
-
-
-Things to Remember :
---------------------
-
-1] This Script basically Tests the SFC functionality with ONOS controller.
-2] Ip address of Openstack and ONOS are got dynamically.
-3] Initally this sfc script can be used for ONOS and on Request , if need will modify for other controllers.
-
-
-Contact Details :
------------------
-
-email-id : antonysilvester@gmail.com
diff --git a/testcases/Controllers/ONOS/Sfc/Sfc.py b/testcases/Controllers/ONOS/Sfc/Sfc.py
deleted file mode 100755
index bea2828d2..000000000
--- a/testcases/Controllers/ONOS/Sfc/Sfc.py
+++ /dev/null
@@ -1,177 +0,0 @@
-"""Script to Test the SFC scenarios in ONOS."""
-# !/usr/bin/python
-#
-# Copyright (c) CREATED5 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# ###########################################################################
-# OPNFV SFC Script
-# **** Scripted by Antony Silvester - antony.silvester@huawei.com ******
-# ###########################################################################
-
-# Testcase 1 : Prerequisites configuration for SFC
-# Testcase 2 : Creation of 3 VNF Nodes and Attaching Ports
-# Testcase 3 : Configure SFC [Port pair,Port Group ,Flow classifer
-# Testcase 4 : Configure Port Chain and verify the flows are added.
-# Testcase 5 : Verify traffic with VNF node.
-# Testcase 6 : Remove the Port Chain and Verify the traffic.
-# Testcase 7 : Cleanup
-# ###########################################################################
-#
-
-import time
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
-from Sfc_fun import Sfc_fun
-
-logger = ft_logger.Logger("sfc").getLogger()
-Sfc_obj = Sfc_fun()
-
-OK = 200
-CREATED = 201
-ACCEPTED = 202
-NO_CONTENT = 204
-
-start_time = time.time()
-
-
-def PreConfig():
- logger.info("Testcase 1 : Prerequisites configuration for SFC")
- logger.info("1.1 Creation of Auth-Token")
- check(Sfc_obj.getToken, OK, "Creation of Token")
- logger.info("1.2 Creation of Network")
- check(Sfc_obj.createNetworks, CREATED, "Creation of network")
- logger.info("1.3 Creation of Subnetwork")
- check(Sfc_obj.createSubnets, CREATED, "Creation of Subnetwork")
-
-
-def CreateNodes():
- logger.info("Testcase 2 : Creation of 3 VNF Nodes and Attaching Ports")
- logger.info("2.1 Creation of Ports")
- check(Sfc_obj.createPorts, CREATED, "Creation of Port")
- logger.info("2.2 Creation of VM-Compute-Node")
- check(Sfc_obj.createVm, ACCEPTED, "Creation of VM")
- logger.info("2.3 Check VM Status")
- check(Sfc_obj.checkVmState, OK, "Vm statue check")
- logger.info("2.4 Router Creation")
- check(Sfc_obj.createRouter, CREATED, "Creation of Router")
- logger.info("2.5 Attachement of Interface to VM")
- check(Sfc_obj.attachInterface, OK, "Interface attached to VM")
- logger.info("2.6 Attachement of FLoating Ip to VM")
- check(Sfc_obj.addFloatingIp, ACCEPTED, "Floating Ip attached to VM")
-
-
-def ConfigSfc():
- logger.info(
- "Testcase 3 : Configure SFC [Portair,PortGroup,Flow classifer]")
- logger.info("3.1 Creation of Port Pair")
- check(Sfc_obj.createPortPair, CREATED, "Creation of Port Pair")
- logger.info("3.2 Getting the Port Pair ID")
- check(Sfc_obj.getPortPair, OK, "Getting Port Pair ID")
- logger.info("3.3 Creation of Port Pair Group")
- check(Sfc_obj.createPortGroup, CREATED, "Creation of Port Pair Group")
- logger.info("3.4 Getting Port Pair Group ID ")
- check(Sfc_obj.getPortGroup, OK, "Getting Port Pair Group ID")
- logger.info("3.5 Creation of Flow Classifier")
- check(Sfc_obj.createFlowClassifier, CREATED, "Creation of Flow Classifier")
- logger.info(
- "Testcase 4 : Configure Port Chain and verify flows are added")
- logger.info("4.1 Creation of Port Chain")
- check(Sfc_obj.createPortChain, CREATED, "Creation of Port Chain")
-
-
-def VerifySfcTraffic():
- status = "PASS"
- logger.info("Testcase 5 : Verify traffic with VNF node.")
- if (Sfc_obj.loginToVM() == "1"):
- logger.info("SFC function Working")
- else:
- logger.error("SFC function not working")
- status = "FAIL"
-
- logger.info("Testcase 6 : Remove the Port Chain and Verify the traffic")
- if (Sfc_obj.deletePortChain() == NO_CONTENT):
- if (Sfc_obj.loginToVM() == "0"):
- logger.info("SFC function is removed Successfully")
- else:
- logger.error("SFC function not Removed. Have some problem")
- status = "FAIL"
- if (Sfc_obj.deleteFlowClassifier() == NO_CONTENT):
- if (Sfc_obj.deletePortGroup() == NO_CONTENT):
- if (Sfc_obj.deletePortPair() == NO_CONTENT):
- logger.info(
- "SFC configuration is deleted successfully")
- else:
- logger.error("Port pair is deleted successfully")
- status = "FAIL"
- else:
- logger.error("Port Group is NOT deleted successfully")
- status = "FAIL"
- else:
- logger.error("Flow classifier is NOT deleted successfully")
- status = "FAIL"
- else:
- logger.error("PortChain configuration is NOT deleted successfully")
- status = "FAIL"
- if (status == "FAIL"):
- fail("Traffic for SFC is NOT verified successfully")
-
-
-def CleanUp():
- logger.info("Testcase 7 : Cleanup")
- if (Sfc_obj.cleanup() == NO_CONTENT):
- logger.info("CleanUp is successfull")
- else:
- logger.error("CleanUp is NOT successfull")
-
-
-def check(method, criteria, msg):
- if (method() == criteria):
- logger.info(msg + 'is Successful')
- else:
- fail(msg + 'is not successful')
-
-
-def fail(fail_info):
- logger.error(fail_info)
- CleanUp()
- PushDB("FAIL", fail_info)
- exit(-1)
-
-
-def PushDB(status, info):
- logger.info("Summary :")
- try:
- logger.debug("Push ONOS SFC results into DB")
- stop_time = time.time()
-
- # ONOS SFC success criteria = all tests OK
- duration = round(stop_time - start_time, 1)
- logger.info("Result is " + status)
- functest_utils.push_results_to_db("functest",
- "onos_sfc",
- start_time,
- stop_time,
- status,
- details={'duration': duration,
- 'error': info})
- except:
- logger.error("Error pushing results into Database")
-
-
-def main():
- """Script to Test the SFC scenarios in ONOS."""
- PreConfig()
- CreateNodes()
- ConfigSfc()
- VerifySfcTraffic()
- CleanUp()
- PushDB("PASS", "")
-
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/Controllers/ONOS/Sfc/Sfc_fun.py b/testcases/Controllers/ONOS/Sfc/Sfc_fun.py
deleted file mode 100644
index 69e076d05..000000000
--- a/testcases/Controllers/ONOS/Sfc/Sfc_fun.py
+++ /dev/null
@@ -1,863 +0,0 @@
-import os
-import re
-import time
-import json
-import requests
-
-from multiprocessing import Process
-from multiprocessing import Queue
-from pexpect import pxssh
-
-import functest.utils.functest_logger as ft_logger
-
-OK = 200
-CREATED = 201
-ACCEPTED = 202
-NO_CONTENT = 204
-
-
-class Sfc_fun:
- """Defines all the def function of SFC."""
-
- def __init__(self):
- """Initialization of variables."""
- self.logger = ft_logger.Logger("sfc_fun").getLogger()
- self.osver = "v2.0"
- self.token_id = 0
- self.net_id = 0
- self.image_id = 0
- self.keystone_hostname = 'keystone_ip'
- self.neutron_hostname = 'neutron_ip'
- self.nova_hostname = 'nova_ip'
- self.glance_hostname = 'glance_ip'
- self.onos_hostname = 'onos_ip'
- # Network variables #######
- self.netname = "test_nw"
- self.admin_state_up = True
- self.tenant_id = 0
- self.subnetId = 0
- # #########################
- # SubNet variables#########
- self.ip_version = 4
- self.cidr = "20.20.20.0/24"
- self.subnetname = "test_nw_subnets"
- # ###############################
- # Port variable
- self.port = "port"
- self.port_num = []
- self.vm_id = 0
- self.port_ip = []
- self.count = 0
- self.i = 0
- self.numTerms = 3
- self.security_groups = []
- self.port_security_enabled = False
- # ###############################
- # VM creation variable
- self.container_format = "bare"
- self.disk_format = "qcow2"
- self.imagename = "TestSfcVm"
- self.createImage = "/home/root1/devstack/files/images/\
- firewall_block_image.img"
-
- self.vm_name = "vm"
- self.imageRef = "test"
- self.flavorRef = "1"
- self.max_count = "1"
- self.min_count = "1"
- self.org_nw_port = []
- self.image_id = 0
- self.routername = "router1"
- self.router_id = 0
- # #####################################
- # Port pair
- self.port_pair_ingress = 0
- self.port_pair_egress = 0
- self.port_pair_name = "PP"
- self.port_pair_id = []
- # ####################################
- # Port Group
- self.port_group_name = "PG"
- self.port_grp_id = []
- # ####################################
- # FlowClassifier
- self.source_ip_prefix = "20.20.20.0/24"
- self.destination_ip_prefix = "20.20.20.0/24"
- self.logical_source_port = 0
- self.fcname = "FC"
- self.ethertype = "IPv4"
- # #####################################
- self.flow_class_if = 0
- # #####################################
- # Port Chain variables
- self.pcname = 'PC'
- self.PC_id = 0
- # #####################################
- # Port Chain variables
- self.flowadd = ''
- # #####################################
- self.ip_pool = 0
- self.vm_public_ip = []
- self.vm_public_id = []
- self.net_id1 = 0
- self.vm = []
- self.address = 0
- self.value = 0
- self.pub_net_id = 0
-
- def getToken(self):
- """Get the keystone token value from Openstack ."""
- url = 'http://' + self.keystone_hostname + \
- ':5000/' + self.osver + '/tokens'
- data = '{"auth": {"tenantName": "admin", "passwordCredentials":\
- { "username": "admin", "password": "console"}}}'
- headers = {"Accept": "application/json"}
- response = requests.post(url, headers=headers, data=data)
- if (response.status_code == OK):
- json1_data = json.loads(response.content)
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.debug(json1_data)
- self.token_id = json1_data['access']['token']['id']
- self.tenant_id = json1_data['access']['token']['tenant']['id']
- return(response.status_code)
- else:
- return(response.status_code)
-
- def createNetworks(self):
- """Creation of networks."""
- Dicdata = {}
- if self.netname != '':
- Dicdata['name'] = self.netname
- if self.admin_state_up != '':
- Dicdata['admin_state_up'] = self.admin_state_up
- Dicdata = {'network': Dicdata}
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.neutron_hostname + \
- ':9696/' + self.osver + '/networks'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.post(url, headers=headers, data=data)
- if (response.status_code == CREATED):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
-
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.net_id = json1_data['network']['id']
- return(response.status_code)
- else:
- return(response.status_code)
-
- def createSubnets(self):
- """Creation of SubNets."""
- Dicdata = {}
- if self.net_id != 0:
- Dicdata['network_id'] = self.net_id
- if self.ip_version != '':
- Dicdata['ip_version'] = self.ip_version
- if self.cidr != '':
- Dicdata['cidr'] = self.cidr
- if self.subnetname != '':
- Dicdata['name'] = self.subnetname
-
- Dicdata = {'subnet': Dicdata}
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.neutron_hostname + \
- ':9696/' + self.osver + '/subnets'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.post(url, headers=headers, data=data)
-
- if (response.status_code == CREATED):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.subnetId = json1_data['subnet']['id']
- return(response.status_code)
- else:
- return(response.status_code)
-
- def createPorts(self):
- """Creation of Ports."""
- for x in range(self.i, self.numTerms):
- Dicdata = {}
- if self.net_id != '':
- Dicdata['network_id'] = self.net_id
- if self.port != '':
- Dicdata['name'] = "port" + str(x)
- if self.admin_state_up != '':
- Dicdata['admin_state_up'] = self.admin_state_up
- if self.security_groups != '':
- Dicdata['security_groups'] = self.security_groups
- # if self.port_security_enabled != '':
- # Dicdata['port_security_enabled'] = self.port_security_enabled
-
- Dicdata = {'port': Dicdata}
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.neutron_hostname + \
- ':9696/' + self.osver + '/ports'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.post(url, headers=headers, data=data)
-
- if (response.status_code == CREATED):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
-
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.port_num.append(json1_data['port']['id'])
- self.port_ip.append(json1_data['port']['fixed_ips'][0]
- ['ip_address'])
- else:
- return(response.status_code)
- return(response.status_code)
-
- def createVm(self):
- """Creation of Instance, using firewall image."""
- url = 'http://' + self.glance_hostname + \
- ':9292/v2/images?name=TestSfcVm'
- headers = {"Accept": "application/json", "Content-Type": "application/\
- octet-stream", "X-Auth-Token": self.token_id}
- response = requests.get(url, headers=headers)
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.info("FireWall Image is available")
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.image_id = json1_data['images'][0]['id']
- else:
- return(response.status_code)
-
- url = 'http://' + self.nova_hostname + \
- ':8774/v2.1/' + self.tenant_id + '/flavors?name=m1.tiny'
- headers = {"Accept": "application/json", "Content-Type":
- "application/json", "X-Auth-Token": self.token_id}
- response = requests.get(url, headers=headers)
-
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.info("Flavor is available")
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.flavorRef = json1_data['flavors'][0]['id']
- else:
- return(response.status_code)
-
- for y in range(0, 3):
- Dicdata = {}
- org_nw_port = []
- org_nw_port.append({'port': self.port_num[y]})
- if self.vm_name != '':
- Dicdata['name'] = "vm" + str(y)
- if self.imageRef != '':
- Dicdata['imageRef'] = self.image_id
- if self.flavorRef != '':
- Dicdata['flavorRef'] = self.flavorRef
- if self.max_count != '':
- Dicdata['max_count'] = self.max_count
- if self.min_count != '':
- Dicdata['min_count'] = self.min_count
- if self.org_nw_port != '':
- Dicdata['networks'] = org_nw_port
- Dicdata = {'server': Dicdata}
- data = json.dumps(Dicdata, indent=4)
-
- url = ('http://' + self.nova_hostname + ':8774/v2.1/' +
- self.tenant_id + '/servers')
- headers = {"Accept": "application/json", "Content-Type":
- "application/json", "X-Auth-Token": self.token_id}
- response = requests.post(url, headers=headers, data=data)
- if (response.status_code == ACCEPTED):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- info = "Creation of VM" + str(y) + " is successfull"
- self.logger.debug(info)
-
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.vm_id = json1_data['server']['id']
- self.vm.append(json1_data['server']['id'])
- else:
- return(response.status_code)
-
- return(response.status_code)
-
- def checkVmState(self):
- """Checking the Status of the Instance."""
- time.sleep(10)
- for y in range(0, 3):
- url = 'http://' + \
- self.nova_hostname + \
- ':8774/v2.1/servers/detail?name=vm' + str(y)
- headers = {"Accept": "application/json", "X-Auth-Token":
- self.token_id}
- response = requests.get(url, headers=headers)
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.vm_active = json1_data['servers'][0]['status']
- if (self.vm_active == "ACTIVE"):
- info = "VM" + str(y) + \
- " is Active : " + self.vm_active
- else:
- info = "VM" + str(y) + " is NOT Active : " + \
- self.vm_active
- self.logger.debug(info)
- else:
- return(response.status_code)
- return(response.status_code)
- time.sleep(10)
-
- def createPortPair(self):
- """Creation of Port Pair."""
- for p in range(1, 2):
- Dicdata = {}
- if self.port_pair_ingress != '':
- Dicdata['ingress'] = self.port_num[p]
- if self.port_pair_egress != '':
- egress = p
- Dicdata['egress'] = self.port_num[egress]
- if self.port_pair_name != '':
- Dicdata['name'] = "PP" + str(p)
-
- Dicdata = {'port_pair': Dicdata}
- data = json.dumps(Dicdata, indent=4)
-
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/port_pairs'
- headers = {"Accept": "application/json", "X-Auth-Token":
- self.token_id}
- response = requests.post(url, headers=headers, data=data)
- if (response.status_code == CREATED):
- info = "Creation of Port Pair PP" + str(p) + \
- " is successful"
- self.logger.debug(info)
- else:
- return(response.status_code)
-
- return(response.status_code)
-
- def getPortPair(self):
- """Query the Portpair id value."""
- for p in range(0, 1):
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/port_pairs?name=PP1'
- headers = {"Accept": "application/json", "X-Auth-Token":
- self.token_id}
- response = requests.get(url, headers=headers)
-
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.port_pair_id.append(json1_data['port_pairs'][0]['id'])
- else:
- return(response.status_code)
- return(response.status_code)
-
- def createPortGroup(self):
- """Creation of PortGroup."""
- for p in range(0, 1):
- Dicdata = {}
- port_pair_list = []
- port_pair_list.append(self.port_pair_id[p])
- if self.port_group_name != '':
- Dicdata['name'] = "PG" + str(p)
- if self.port_pair_id != '':
- Dicdata['port_pairs'] = port_pair_list
-
- Dicdata = {'port_pair_group': Dicdata}
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/port_pair_groups'
- headers = {"Accept": "application/json", "X-Auth-Token":
- self.token_id}
- response = requests.post(url, headers=headers, data=data)
- if (response.status_code == CREATED):
- info = "Creation of Port Group PG" + str(p) + \
- "is successful"
- self.logger.debug(info)
- else:
- return(response.status_code)
-
- return(response.status_code)
-
- def getPortGroup(self):
- """Query the PortGroup id."""
- for p in range(0, 1):
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/port_pair_groups?name=PG' + str(p)
- headers = {"Accept": "application/json", "X-Auth-Token":
- self.token_id}
- response = requests.get(url, headers=headers)
-
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- json1_data = json.loads(response.content)
- self.port_grp_id.append(json1_data['port_pair_groups']
- [0]['id'])
- else:
- return(response.status_code)
- return(response.status_code)
-
- def createFlowClassifier(self):
- """Creation of Flow Classifier."""
- Dicdata = {}
- if self.source_ip_prefix != '':
- Dicdata['source_ip_prefix'] = self.source_ip_prefix
- if self.destination_ip_prefix != '':
- Dicdata['destination_ip_prefix'] = self.destination_ip_prefix
- if self.logical_source_port != '':
- Dicdata['logical_source_port'] = self.port_num[0]
- if self.fcname != '':
- Dicdata['name'] = "FC1"
- if self.ethertype != '':
- Dicdata['ethertype'] = self.ethertype
-
- Dicdata = {'flow_classifier': Dicdata}
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/flow_classifiers'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.post(url, headers=headers, data=data)
- if (response.status_code == CREATED):
- json1_data = json.loads(response.content)
- self.flow_class_if = json1_data['flow_classifier']['id']
- self.logger.debug("Creation of Flow Classifier is successful")
- return(response.status_code)
- else:
- return(response.status_code)
-
- def createPortChain(self):
- """Creation of PortChain."""
- Dicdata = {}
- flow_class_list = []
- flow_class_list.append(self.flow_class_if)
- port_pair_groups_list = []
- port_pair_groups_list.append(self.port_grp_id[0])
-
- if flow_class_list != '':
- Dicdata['flow_classifiers'] = flow_class_list
- if self.pcname != '':
- Dicdata['name'] = "PC1"
- if port_pair_groups_list != '':
- Dicdata['port_pair_groups'] = port_pair_groups_list
-
- Dicdata = {'port_chain': Dicdata}
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/port_chains'
- headers = {"Accept": "application/json",
- "Content-Type": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.post(url, headers=headers, data=data)
- if (response.status_code == CREATED):
- self.logger.debug("Creation of PORT CHAIN is successful")
- json1_data = json.loads(response.content)
- self.PC_id = json1_data['port_chain']['id']
- return(response.status_code)
- else:
- return(response.status_code)
-
- def checkFlowAdded(self):
- """Check whether the Flows are downloaded successfully."""
- time.sleep(5)
- response = requests.get('http://' + self.onos_hostname +
- ':8181/onos/v1/flows',
- auth=("karaf", "karaf"))
- if (response.status_code == OK):
- self.logger.debug("Flow is successfully Queries")
- json1_data = json.loads(response.content)
- self.flowadd = json1_data['flows'][0]['state']
-
- if (self.flowadd == "ADDED"):
- self.logger.info("Flow is successfully added to OVS")
- return(response.status_code)
- else:
- return(404)
- else:
- return(response.status_code)
-####################################################################
-
- def createRouter(self):
- """Creation of Router."""
- Dicdata = {}
- if self.routername != '':
- Dicdata['name'] = "router1"
- if self.admin_state_up != '':
- Dicdata['admin_state_up'] = self.admin_state_up
-
- Dicdata = {'router': Dicdata}
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.neutron_hostname + ':9696/' + \
- self.osver + '/routers.json'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.post(url, headers=headers, data=data)
- if (response.status_code == CREATED):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.debug("Creation of Router is successfull")
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.router_id = json1_data['router']['id']
- return(response.status_code)
- else:
- return(response.status_code)
-
- def attachInterface(self):
- """Attachment of instance ports to the Router."""
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/networks?name=admin_floating_net'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.get(url, headers=headers)
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.net_name = json1_data['networks'][0]['name']
- if (self.net_name == "admin_floating_net"):
- self.pub_net_id = json1_data['networks'][0]['id']
- else:
- return(response.status_code)
- ############################################################
-
- self.logger.info("Attachment of Instance interface to Router")
- Dicdata = {}
- if self.subnetId != '':
- Dicdata['subnet_id'] = self.subnetId
-
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/routers/' + self.router_id + '/add_router_interface'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.put(url, headers=headers, data=data)
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.info("Interface attached successfull")
- else:
- return(response.status_code)
- ############################################################
- self.logger.info("Attachment of Gateway to Router")
-
- Dicdata1 = {}
- if self.pub_net_id != 0:
- Dicdata1['network_id'] = self.pub_net_id
-
- Dicdata1 = {'external_gateway_info': Dicdata1}
- Dicdata1 = {'router': Dicdata1}
- data = json.dumps(Dicdata1, indent=4)
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/routers/' + self.router_id
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.put(url, headers=headers, data=data)
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.info("Gateway Interface attached successfull")
- return(response.status_code)
- else:
- return(response.status_code)
-
- def addFloatingIp(self):
- """Attachment of Floating Ip to the Router."""
- for ip_num in range(0, 2):
- Dicdata = {}
- Dicdata['pool'] = "admin_floating_net"
-
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.nova_hostname + ':8774/v2.1/os-floating-ips'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.post(url, headers=headers, data=data)
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.info("Floating ip created successfully")
- json1_data = json.loads(response.content)
- self.logger.debug(json1_data)
- self.vm_public_ip.append(json1_data['floating_ip']['ip'])
- self.vm_public_id.append(json1_data['floating_ip']['id'])
- else:
- self.logger.error("Floating ip NOT created successfully")
-
- Dicdata1 = {}
- if self.address != '':
- Dicdata1['address'] = self.vm_public_ip[ip_num]
-
- Dicdata1 = {'addFloatingIp': Dicdata1}
- data = json.dumps(Dicdata1, indent=4)
- url = 'http://' + self.nova_hostname + ':8774/v2.1/servers/' + \
- self.vm[ip_num] + '/action'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.post(url, headers=headers, data=data)
- if(response.status_code == ACCEPTED):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.info("Public Ip successfully added to VM")
- else:
- return(response.status_code)
- return(response.status_code)
-
- def loginToVM(self):
- """Login to the VM to check NSH packets are received."""
- queue1 = "0"
-
- def vm0():
-
- s = pxssh.pxssh()
- hostname = self.vm_public_ip[0]
- username = "cirros"
- password = "cubswin:)"
- s.login(hostname, username, password)
- s.sendline("ping -c 5 " + str(self.port_ip[2]))
- s.prompt() # match the prompt
-
- ping_re = re.search("transmitted.*received", s.before).group()
- x = re.split('\s+', ping_re)
- if (x[1] >= "1"):
- self.logger.info("Ping is Successfull")
- else:
- self.logger.info("Ping is NOT Successfull")
-
- def vm1(queue1):
- s = pxssh.pxssh()
- hostname = self.vm_public_ip[1]
- username = "cirros"
- password = "cubswin:)"
- s.login(hostname, username, password)
- s.sendline('sudo ./firewall')
- s.prompt()
- output_pack = s.before
-
- if(output_pack.find("nshc") != -1):
- self.logger.info("The packet has reached VM2 Instance")
- queue1.put("1")
- else:
- self.logger.info("Packet not received in Instance")
- queue1.put("0")
-
- def ping(ip, timeout=300):
- while True:
- time.sleep(1)
- self.logger.debug("Pinging %s. Waiting for response..." % ip)
- response = os.system("ping -c 1 " + ip + " >/dev/null 2>&1")
- if response == 0:
- self.logger.info("Ping " + ip + " detected!")
- return 0
-
- elif timeout == 0:
- self.logger.info("Ping " + ip + " timeout reached.")
- return 1
- timeout -= 1
-
- result0 = ping(self.vm_public_ip[0])
- result1 = ping(self.vm_public_ip[1])
- if result0 == 0 and result1 == 0:
- time.sleep(300)
- queue1 = Queue()
- p1 = Process(target=vm1, args=(queue1, ))
- p1.start()
- p2 = Process(target=vm0)
- p2.start()
- p1.join(10)
- return (queue1.get())
- else:
- self.logger.error("Thread didnt run")
-
- """##################################################################"""
- """ ######################## Stats Functions ################# #####"""
-
- def portChainDeviceMap(self):
- """Check the PC Device Stats in the ONOS."""
- response = requests.get('http://' + self.onos_hostname +
- ':8181/onos/vtn/portChainDeviceMap/' +
- self.PC_id, auth=("karaf", "karaf"))
- if (response.status_code == OK):
- self.logger.info("PortChainDeviceMap is successfully Queries")
- return(response.status_code)
- else:
- return(response.status_code)
-
- def portChainSfMap(self):
- """Check the PC SF Map Stats in the ONOS."""
- response = requests.get('http://' + self.onos_hostname +
- ':8181/onos/vtn/portChainSfMap/' +
- self.PC_id, auth=("karaf", "karaf"))
- if (response.status_code == OK):
- self.logger.info("portChainSfMap is successfully Queries")
- return(response.status_code)
- else:
- return(response.status_code)
-
- """###################################################################"""
-
- def deletePortChain(self):
- """Deletion of PortChain."""
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/port_chains/' + self.PC_id
- headers = {"Accept": "application/json", "Content-Type":
- "application/json", "X-Auth-Token": self.token_id}
- response = requests.delete(url, headers=headers)
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- return(response.status_code)
- else:
- return(response.status_code)
-
- def deleteFlowClassifier(self):
- """Deletion of Flow Classifier."""
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/flow_classifiers/' + self.flow_class_if
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.delete(url, headers=headers)
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- return(response.status_code)
- else:
- return(response.status_code)
-
- def deletePortGroup(self):
- """Deletion of PortGroup."""
- for p in range(0, 1):
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/port_pair_groups/' + self.port_grp_id[p]
- headers = {"Accept": "application/json", "X-Auth-Token":
- self.token_id}
- response = requests.delete(url, headers=headers)
- if (response.status_code == NO_CONTENT):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- else:
- return(response.status_code)
- return(response.status_code)
-
- def deletePortPair(self):
- """Deletion of Portpair."""
- for p in range(1, 2):
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/sfc/port_pairs/' + self.port_pair_id[0]
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.delete(url, headers=headers)
- if (response.status_code == NO_CONTENT):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- else:
- return(response.status_code)
- return(response.status_code)
-
- def cleanup(self):
- """Cleanup."""
- self.logger.info("Deleting VMs")
- for y in range(0, 3):
- url = 'http://' + self.nova_hostname + \
- ':8774/v2.1/servers/' + self.vm[y]
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.delete(url, headers=headers)
- if (response.status_code == NO_CONTENT):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.debug("VM" + str(y) + " is Deleted : ")
- time.sleep(10)
- else:
- return(response.status_code)
- self.logger.info("Deleting Ports")
- for x in range(self.i, self.numTerms):
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/ports/' + self.port_num[x]
- headers = {"Accept": "application/json", "X-Auth-Token":
- self.token_id}
- response = requests.delete(url, headers=headers)
-
- if (response.status_code == NO_CONTENT):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- self.logger.debug("Port" + str(x) + " Deleted")
- else:
- return(response.status_code)
- self.logger.info("Deleting Router")
-
- Dicdata = {}
- Dicdata['external_gateway_info'] = {}
- Dicdata = {'router': Dicdata}
- data = json.dumps(Dicdata, indent=4)
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/routers/' + self.router_id
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.put(url, headers=headers, data=data)
- if (response.status_code == OK):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- Dicdata1 = {}
- if self.subnetId != '':
- Dicdata1['subnet_id'] = self.subnetId
- data = json.dumps(Dicdata1, indent=4)
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/routers/' + self.router_id + \
- '/remove_router_interface.json'
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.put(url, headers=headers, data=data)
- if (response.status_code == OK):
- url = ('http://' + self.neutron_hostname + ':9696/' +
- self.osver + '/routers/' + self.router_id)
- headers = {"Accept": "application/json", "X-Auth-Token":
- self.token_id}
- response = requests.delete(url, headers=headers)
- if (response.status_code == NO_CONTENT):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- else:
- return(response.status_code)
- else:
- return(response.status_code)
- else:
- return(response.status_code)
-
- self.logger.info("Deleting Network")
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/networks/' + self.net_id
- headers = {"Accept": "application/json",
- "X-Auth-Token": self.token_id}
- response = requests.delete(url, headers=headers)
- if (response.status_code == NO_CONTENT):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- else:
- return(response.status_code)
-
- self.logger.info("Deleting Floating ip")
- for ip_num in range(0, 2):
- url = 'http://' + self.neutron_hostname + ':9696/' + self.osver + \
- '/floatingips/' + self.vm_public_id[ip_num]
- headers = {"Accept": "application/json", "X-Auth-Token":
- self.token_id}
- response = requests.delete(url, headers=headers)
- if (response.status_code == NO_CONTENT):
- self.logger.debug(response.status_code)
- self.logger.debug(response.content)
- else:
- return(response.status_code)
- return(response.status_code)
diff --git a/testcases/Controllers/ONOS/Teston/Readme.txt b/testcases/Controllers/ONOS/Teston/Readme.txt
deleted file mode 100644
index 7393f59a1..000000000
--- a/testcases/Controllers/ONOS/Teston/Readme.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-1.This is a basic test run about onos,we will make them better and better
-2.This test include two suites:
-(1)Test northbound(network/subnet/ports create/update/delete)
-(2)Ovsdb test,default configuration,openflow connection,vm go onlines.
-3.Later we will make a framework to do this test \ No newline at end of file
diff --git a/testcases/Controllers/ONOS/Teston/__init__.py b/testcases/Controllers/ONOS/Teston/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/testcases/Controllers/ONOS/Teston/__init__.py
+++ /dev/null
diff --git a/testcases/Controllers/ONOS/Teston/adapters/__init__.py b/testcases/Controllers/ONOS/Teston/adapters/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/testcases/Controllers/ONOS/Teston/adapters/__init__.py
+++ /dev/null
diff --git a/testcases/Controllers/ONOS/Teston/adapters/client.py b/testcases/Controllers/ONOS/Teston/adapters/client.py
deleted file mode 100644
index 6b3285e5e..000000000
--- a/testcases/Controllers/ONOS/Teston/adapters/client.py
+++ /dev/null
@@ -1,92 +0,0 @@
-"""
-Description:
- This file is used to run testcase
- lanqinglong@huawei.com
-
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-"""
-import json
-import pexpect
-import requests
-import time
-
-from environment import environment
-import functest.utils.functest_logger as ft_logger
-
-
-class client(environment):
-
- logger = ft_logger.Logger("client").getLogger()
-
- def __init__(self):
- environment.__init__(self)
- self.loginfo = environment()
- self.testcase = ''
-
- def RunScript(self, handle, testname, timeout=300):
- """
- Run ONOS Test Script
- Parameters:
- testname: ONOS Testcase Name
- masterusername: The server username of running ONOS
- masterpassword: The server password of running ONOS
- """
- self.testcase = testname
- self.ChangeTestCasePara(testname, self.masterusername,
- self.masterpassword)
- runhandle = handle
- runtest = (self.home + "/OnosSystemTest/TestON/bin/cli.py run " +
- testname)
- runhandle.sendline(runtest)
- circletime = 0
- lastshowscreeninfo = ''
- while True:
- Result = runhandle.expect(["PEXPECT]#", pexpect.EOF,
- pexpect.TIMEOUT])
- curshowscreeninfo = runhandle.before
- if(len(lastshowscreeninfo) != len(curshowscreeninfo)):
- self.loginfo.log(str(curshowscreeninfo)
- [len(lastshowscreeninfo)::])
- lastshowscreeninfo = curshowscreeninfo
- if Result == 0:
- self.logger.info("Done!")
- return
- time.sleep(1)
- circletime += 1
- if circletime > timeout:
- break
- self.loginfo.log("Timeout when running the test, please check!")
-
- def onosstart(self):
- # This is the compass run machine user&pass,you need to modify
-
- self.logger.info("Test Begin.....")
- self.OnosConnectionSet()
- masterhandle = self.SSHlogin(self.localhost, self.masterusername,
- self.masterpassword)
- self.OnosEnvSetup(masterhandle)
- return masterhandle
-
- def onosclean(self, handle):
- self.SSHRelease(handle)
- self.loginfo.log('Release onos handle Successful')
-
- def push_results_to_db(self, payload, pushornot=1):
- if pushornot != 1:
- return 1
- url = self.Result_DB + "/results"
- params = {"project_name": "functest", "case_name": "ONOS-" +
- self.testcase, "pod_name": 'huawei-build-2',
- "details": payload}
-
- headers = {'Content-Type': 'application/json'}
- try:
- r = requests.post(url, data=json.dumps(params), headers=headers)
- self.loginfo.log(r)
- except:
- self.loginfo.log('Error pushing results into Database')
diff --git a/testcases/Controllers/ONOS/Teston/adapters/connection.py b/testcases/Controllers/ONOS/Teston/adapters/connection.py
deleted file mode 100644
index b2a2e3d88..000000000
--- a/testcases/Controllers/ONOS/Teston/adapters/connection.py
+++ /dev/null
@@ -1,200 +0,0 @@
-"""
-Description:
- This file is used to make connections
- Include ssh & exchange public-key to each other so that
- it can run without password
-
- lanqinglong@huawei.com
-
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-"""
-import os
-import pexpect
-import re
-
-from foundation import foundation
-import functest.utils.functest_logger as ft_logger
-
-
-class connection(foundation):
-
- logger = ft_logger.Logger("connection").getLogger()
-
- def __init__(self):
- foundation.__init__(self)
- self.loginfo = foundation()
-
- def AddKnownHost(self, handle, ipaddr, username, password):
- """
- Add an user to known host,so that onos can login in with onos $ipaddr.
- parameters:
- ipaddr: ip address
- username: login user name
- password: login password
- """
- self.logger.info("Now Adding an user to known hosts " + ipaddr)
- login = handle
- login.sendline("ssh -l %s -p 8101 %s" % (username, ipaddr))
- index = 0
- while index != 2:
- index = login.expect(['assword:', 'yes/no', pexpect.EOF,
- pexpect.TIMEOUT])
- if index == 0:
- login.sendline(password)
- login.sendline("logout")
- index = login.expect(["closed", pexpect.EOF])
- if index == 0:
- self.loginfo.log("Add SSH Known Host Success!")
- break
- else:
- self.loginfo.log("Add SSH Known Host Failed! "
- "Please Check!")
- break
- login.prompt()
-
- if index == 1:
- login.sendline('yes')
-
- def GetEnvValue(self, handle, envname):
- """
- os.getenv only returns current user value
- GetEnvValue returns a environment value of
- current handle
- eg: GetEnvValue(handle,'HOME')
- """
- envhandle = handle
- envhandle.sendline('echo $' + envname)
- envhandle.prompt()
- reg = envname + '\r\n(.*)\r'
- envaluereg = re.compile(reg)
- envalue = envaluereg.search(envhandle.before)
- if envalue:
- return envalue.groups()[0]
- else:
- return None
-
- def Gensshkey(self, handle):
- """
- Generate ssh keys, used for some server have no sshkey.
- """
- self.logger.info("Now Generating SSH keys...")
- # Here file name may be id_rsa or id_ecdsa or others
- # So here will have a judgement
- keysub = handle
- filepath = self.GetEnvValue(keysub, 'HOME') + '/.ssh'
- filelist = os.listdir(filepath)
- for item in filelist:
- if 'id' in item:
- self.loginfo.log("SSH keys are exsit in ssh directory.")
- return True
- keysub.sendline("ssh-keygen -t rsa")
- Result = 0
- while Result != 2:
- Result = keysub.expect(["Overwrite", "Enter", pexpect.EOF,
- 'PEXPECT]#', pexpect.TIMEOUT])
- if Result == 0:
- keysub.sendline("y")
- if Result == 1 or Result == 2:
- keysub.sendline("\n")
- if Result == 3:
- self.loginfo.log("Generate SSH key success.")
- keysub.prompt()
- break
- if Result == 4:
- self.loginfo.log("Generate SSH key failed.")
- keysub.prompt()
- break
-
- def GetRootAuth(self, password):
- """
- Get root user
- parameters:
- password: root login password
- """
- self.logger.info("Now changing to user root")
- login = pexpect.spawn("su - root")
- index = 0
- while index != 2:
- index = login.expect(['assword:', "failure",
- pexpect.EOF, pexpect.TIMEOUT])
- if index == 0:
- login.sendline(password)
- if index == 1:
- self.loginfo.log("Change user to root failed.")
-
- login.interact()
-
- def ReleaseRootAuth(self):
- """
- Exit root user.
- """
- self.logger.info("Now Release user root")
- login = pexpect.spawn("exit")
- index = login.expect(['logout', pexpect.EOF, pexpect.TIMEOUT])
- if index == 0:
- self.loginfo.log("Release root user success.")
- if index == 1:
- self.loginfo.log("Release root user failed.")
-
- login.interact()
-
- def AddEnvIntoBashrc(self, envalue):
- """
- Add Env var into /etc/profile.
- parameters:
- envalue: environment value to add
- """
- self.logger.info("Now Adding bash environment")
- fileopen = open("/etc/profile", 'r')
- findContext = 1
- while findContext:
- findContext = fileopen.readline()
- result = findContext.find(envalue)
- if result != -1:
- break
- fileopen.close
- if result == -1:
- envAdd = open("/etc/profile", 'a+')
- envAdd.writelines("\n" + envalue)
- envAdd.close()
- self.loginfo.log("Add env to bashrc success!")
-
- def OnosRootPathChange(self, onospath):
- """
- Change ONOS root path in file:bash_profile
- onospath: path of onos root
- """
- self.logger.info("Now Changing ONOS Root Path")
- filepath = onospath + 'onos/tools/dev/bash_profile'
- line = open(filepath, 'r').readlines()
- lenall = len(line) - 1
- for i in range(lenall):
- if "export ONOS_ROOT" in line[i]:
- line[i] = 'export ONOS_ROOT=' + onospath + 'onos\n'
- NewFile = open(filepath, 'w')
- NewFile.writelines(line)
- NewFile.close
- self.logger.info("Done!")
-
- def OnosConnectionSet(self):
- """
- Intergrate for ONOS connection setup
- """
- if self.masterusername == 'root':
- filepath = '/root/'
- else:
- filepath = '/home/' + self.masterusername + '/'
- filepath = os.path.join(filepath, "onos/tools/dev/bash_profile")
- self.AddEnvIntoBashrc("source " + filepath + "\n")
- self.AddEnvIntoBashrc("export OCT=" + self.OCT)
- self.AddEnvIntoBashrc("export OC1=" + self.OC1)
- self.AddEnvIntoBashrc("export OC2=" + self.OC2)
- self.AddEnvIntoBashrc("export OC3=" + self.OC3)
- self.AddEnvIntoBashrc("export OCN=" + self.OCN)
- self.AddEnvIntoBashrc("export OCN2=" + self.OCN2)
- self.AddEnvIntoBashrc("export localhost=" + self.localhost)
diff --git a/testcases/Controllers/ONOS/Teston/adapters/environment.py b/testcases/Controllers/ONOS/Teston/adapters/environment.py
deleted file mode 100644
index f2755b669..000000000
--- a/testcases/Controllers/ONOS/Teston/adapters/environment.py
+++ /dev/null
@@ -1,286 +0,0 @@
-"""
-Description:
- This file is used to setup the running environment
- Include Download code,setup environment variable
- Set onos running config
- Set user name/password
- Onos-push-keys and so on
- lanqinglong@huawei.com
-
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-"""
-
-import pexpect
-import pxssh
-import re
-import os
-import sys
-import time
-
-from connection import connection
-import functest.utils.functest_logger as ft_logger
-
-
-class environment(connection):
-
- logger = ft_logger.Logger("environment").getLogger()
-
- def __init__(self):
- connection.__init__(self)
- self.loginfo = connection()
- self.masterhandle = ''
- self.home = ''
-
- def DownLoadCode(self, handle, codeurl):
- """
- Download Code use 'git clone'
- parameters:
- handle: current working handle
- codeurl: clone code url
- """
- self.logger.info("Now loading test codes! Please wait in patient...")
- originalfolder = sys.path[0]
- self.logger.info(originalfolder)
- gitclone = handle
- gitclone.sendline("git clone " + codeurl)
- index = 0
- # increment = 0
- while index != 1 or index != 4:
- index = gitclone.expect(['already exists',
- 'esolving deltas: 100%',
- 'eceiving objects',
- 'Already up-to-date',
- 'npacking objects: 100%', pexpect.EOF])
-
- filefolder = self.home + '/' + codeurl.split('/')[-1].split('.')[0]
- if index == 0:
- os.chdir(filefolder)
- os.system('git pull')
- os.chdir(originalfolder)
- self.loginfo.log('Download code success!')
- break
- elif index == 1 or index == 4:
- self.loginfo.log('Download code success!')
- gitclone.sendline("mkdir onos")
- gitclone.prompt()
- gitclone.sendline("cp -rf " + filefolder + "/tools onos/")
- gitclone.prompt()
- break
- elif index == 2:
- os.write(1, gitclone.before)
- sys.stdout.flush()
- else:
- self.loginfo.log('Download code failed!')
- self.loginfo.log('Information before' + gitclone.before)
- break
- gitclone.prompt()
-
- def InstallDefaultSoftware(self, handle):
- """
- Install default software
- parameters:
- handle(input): current working handle
- """
- self.logger.info("Now Cleaning test environment")
- handle.sendline("sudo apt-get install -y mininet")
- handle.prompt()
- handle.sendline("sudo pip install configobj")
- handle.prompt()
- handle.sendline("sudo apt-get install -y sshpass")
- handle.prompt()
- handle.sendline("OnosSystemTest/TestON/bin/cleanup.sh")
- handle.prompt()
- time.sleep(5)
- self.loginfo.log('Clean environment success!')
-
- def OnosPushKeys(self, handle, cmd, password):
- """
- Using onos-push-keys to make ssh device without password
- parameters:
- handle(input): working handle
- cmd(input): onos-push-keys xxx(xxx is device)
- password(input): login in password
- """
- self.logger.info("Now Pushing Onos Keys:" + cmd)
- Pushkeys = handle
- Pushkeys.sendline(cmd)
- Result = 0
- while Result != 2:
- Result = Pushkeys.expect(["(yes/no)", "assword:", "PEXPECT]#",
- pexpect.EOF, pexpect.TIMEOUT])
- if(Result == 0):
- Pushkeys.sendline("yes")
- if(Result == 1):
- Pushkeys.sendline(password)
- if(Result == 2):
- self.loginfo.log("ONOS Push keys Success!")
- break
- if(Result == 3):
- self.loginfo.log("ONOS Push keys Error!")
- break
- time.sleep(2)
- Pushkeys.prompt()
- self.logger.info("Done!")
-
- def SetOnosEnvVar(self, handle, masterpass, agentpass):
- """
- Setup onos pushkeys to all devices(3+2)
- parameters:
- handle(input): current working handle
- masterpass: scripts running server's password
- agentpass: onos cluster&compute node password
- """
- self.logger.info("Now Setting test environment")
- for host in self.hosts:
- self.logger.info("try to connect " + str(host))
- result = self.CheckSshNoPasswd(host)
- if not result:
- self.logger.info(
- "ssh login failed,try to copy master publickey" +
- "to agent " + str(host))
- self.CopyPublicKey(host)
- self.OnosPushKeys(handle, "onos-push-keys " + self.OCT, masterpass)
- self.OnosPushKeys(handle, "onos-push-keys " + self.OC1, agentpass)
- self.OnosPushKeys(handle, "onos-push-keys " + self.OC2, agentpass)
- self.OnosPushKeys(handle, "onos-push-keys " + self.OC3, agentpass)
- self.OnosPushKeys(handle, "onos-push-keys " + self.OCN, agentpass)
- self.OnosPushKeys(handle, "onos-push-keys " + self.OCN2, agentpass)
-
- def CheckSshNoPasswd(self, host):
- """
- Check master can connect agent with no password
- """
- login = pexpect.spawn("ssh " + str(host))
- index = 4
- while index == 4:
- index = login.expect(['(yes/no)', '>|#|\$',
- pexpect.EOF, pexpect.TIMEOUT])
- if index == 0:
- login.sendline("yes")
- index = 4
- if index == 1:
- self.loginfo.log("ssh connect to " + str(host) +
- " success,no need to copy ssh public key")
- return True
- login.interact()
- return False
-
- def ChangeOnosName(self, user, password):
- """
- Change onos name in envDefault file
- Because some command depend on this
- parameters:
- user: onos&compute node user
- password: onos&compute node password
- """
- self.logger.info("Now Changing ONOS name&password")
- filepath = self.home + '/onos/tools/build/envDefaults'
- line = open(filepath, 'r').readlines()
- lenall = len(line) - 1
- for i in range(lenall):
- if "ONOS_USER=" in line[i]:
- line[i] = line[i].replace("sdn", user)
- if "ONOS_GROUP" in line[i]:
- line[i] = line[i].replace("sdn", user)
- if "ONOS_PWD" in line[i]:
- line[i] = line[i].replace("rocks", password)
- NewFile = open(filepath, 'w')
- NewFile.writelines(line)
- NewFile.close
- self.logger.info("Done!")
-
- def ChangeTestCasePara(self, testcase, user, password):
- """
- When running test script, there's something need \
- to change in every test folder's *.param & *.topo files
- user: onos&compute node user
- password: onos&compute node password
- """
- self.logger.info("Now Changing " + testcase + " name&password")
- if self.masterusername == 'root':
- filepath = '/root/'
- else:
- filepath = '/home/' + self.masterusername + '/'
- filepath = (filepath + "OnosSystemTest/TestON/tests/" +
- testcase + "/" + testcase + ".topo")
- line = open(filepath, 'r').readlines()
- lenall = len(line) - 1
- for i in range(lenall - 2):
- if("localhost" in line[i]) or ("OCT" in line[i]):
- line[i + 1] = re.sub(">\w+", ">" + user, line[i + 1])
- line[i + 2] = re.sub(">\w+", ">" + password, line[i + 2])
- if ("OC1" in line[i] or "OC2" in line[i] or "OC3" in line[i] or
- "OCN" in line[i] or "OCN2" in line[i]):
- line[i + 1] = re.sub(">\w+", ">root", line[i + 1])
- line[i + 2] = re.sub(">\w+", ">root", line[i + 2])
- NewFile = open(filepath, 'w')
- NewFile.writelines(line)
- NewFile.close
-
- def SSHlogin(self, ipaddr, username, password):
- """
- SSH login provide a connection to destination.
- parameters:
- ipaddr: ip address
- username: login user name
- password: login password
- return: handle
- """
- login = pxssh.pxssh()
- login.login(ipaddr, username, password, original_prompt='[$#>]')
- # send command ls -l
- login.sendline('ls -l')
- # match prompt
- login.prompt()
- self.logger.info("SSH login " + ipaddr + " success!")
- return login
-
- def SSHRelease(self, handle):
- # Release ssh
- handle.logout()
-
- def CopyOnostoTestbin(self):
- sourcefile = self.cipath + '/dependencies/onos'
- destifile = self.home + '/onos/tools/test/bin/'
- os.system('pwd')
- runcommand = 'cp ' + sourcefile + ' ' + destifile
- os.system(runcommand)
-
- def CopyPublicKey(self, host):
- output = os.popen('cat /root/.ssh/id_rsa.pub')
- publickey = output.read().strip('\n')
- tmphandle = self.SSHlogin(self.installer_master,
- self.installer_master_username,
- self.installer_master_password)
- tmphandle.sendline("ssh " + host + " -T \'echo " +
- str(publickey) + ">>/root/.ssh/authorized_keys\'")
- tmphandle.prompt()
- self.SSHRelease(tmphandle)
- self.logger.info("Add OCT PublicKey to " + host + " success")
-
- def OnosEnvSetup(self, handle):
- """
- Onos Environment Setup function
- """
- self.Gensshkey(handle)
- self.home = self.GetEnvValue(handle, 'HOME')
- self.AddKnownHost(handle, self.OC1, "karaf", "karaf")
- self.AddKnownHost(handle, self.OC2, "karaf", "karaf")
- self.AddKnownHost(handle, self.OC3, "karaf", "karaf")
- self.DownLoadCode(handle,
- 'https://github.com/wuwenbin2/OnosSystemTest.git')
- # self.DownLoadCode(handle, 'https://gerrit.onosproject.org/onos')
- if self.masterusername == 'root':
- filepath = '/root/'
- else:
- filepath = '/home/' + self.masterusername + '/'
- self.OnosRootPathChange(filepath)
- self.CopyOnostoTestbin()
- self.ChangeOnosName(self.agentusername, self.agentpassword)
- self.InstallDefaultSoftware(handle)
- self.SetOnosEnvVar(handle, self.masterpassword, self.agentpassword)
diff --git a/testcases/Controllers/ONOS/Teston/adapters/foundation.py b/testcases/Controllers/ONOS/Teston/adapters/foundation.py
deleted file mode 100644
index 5c42c35e8..000000000
--- a/testcases/Controllers/ONOS/Teston/adapters/foundation.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""
-Description:
- This file include basis functions
- lanqinglong@huawei.com
-
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-"""
-
-import datetime
-import logging
-import os
-import re
-import time
-
-import functest.utils.functest_utils as ft_utils
-
-
-class foundation:
-
- def __init__(self):
-
- # currentpath = os.getcwd()
- REPO_PATH = ft_utils.FUNCTEST_REPO + '/'
- currentpath = REPO_PATH + 'testcases/Controllers/ONOS/Teston/CI'
- self.cipath = currentpath
- self.logdir = os.path.join(currentpath, 'log')
- self.workhome = currentpath[0: currentpath.rfind('testcases') - 1]
- self.Result_DB = ''
- filename = time.strftime('%Y-%m-%d-%H-%M-%S') + '.log'
- self.logfilepath = os.path.join(self.logdir, filename)
- self.starttime = datetime.datetime.now()
-
- def log(self, loginfo):
- """
- Record log in log directory for deploying test environment
- parameters:
- loginfo(input): record info
- """
- logging.basicConfig(level=logging.INFO,
- format='%(asctime)s %(filename)s:%(message)s',
- datefmt='%d %b %Y %H:%M:%S',
- filename=self.logfilepath,
- filemode='w')
- filelog = logging.FileHandler(self.logfilepath)
- logging.getLogger('Functest').addHandler(filelog)
- logging.info(loginfo)
-
- def getdefaultpara(self):
- """
- Get Default Parameters value
- """
- self.Result_DB = str(
- ft_utils.get_functest_config('results.test_db_url'))
- self.masterusername = str(
- ft_utils.get_functest_config('ONOS.general.onosbench_username'))
- self.masterpassword = str(
- ft_utils.get_functest_config('ONOS.general.onosbench_password'))
- self.agentusername = str(
- ft_utils.get_functest_config('ONOS.general.onoscli_username'))
- self.agentpassword = str(
- ft_utils.get_functest_config('ONOS.general.onoscli_password'))
- self.runtimeout = \
- ft_utils.get_functest_config('ONOS.general.runtimeout')
- self.OCT = str(ft_utils.get_functest_config('ONOS.environment.OCT'))
- self.OC1 = str(ft_utils.get_functest_config('ONOS.environment.OC1'))
- self.OC2 = str(ft_utils.get_functest_config('ONOS.environment.OC2'))
- self.OC3 = str(ft_utils.get_functest_config('ONOS.environment.OC3'))
- self.OCN = str(ft_utils.get_functest_config('ONOS.environment.OCN'))
- self.OCN2 = str(ft_utils.get_functest_config('ONOS.environment.OCN2'))
- self.installer_master = str(
- ft_utils.get_functest_config('ONOS.environment.installer_master'))
- self.installer_master_username = str(ft_utils.get_functest_config(
- 'ONOS.environment.installer_master_username'))
- self.installer_master_password = str(ft_utils.get_functest_config(
- 'ONOS.environment.installer_master_password'))
- self.hosts = [self.OC1, self.OCN, self.OCN2]
- self.localhost = self.OCT
-
- def GetResult(self):
- cmd = "cat " + self.logfilepath + " | grep Fail"
- Resultbuffer = os.popen(cmd).read()
- duration = datetime.datetime.now() - self.starttime
- time.sleep(2)
-
- if re.search("[1-9]+", Resultbuffer):
- self.log("Testcase Fails\n" + Resultbuffer)
- Result = "POK"
- else:
- self.log("Testcases Pass")
- Result = "OK"
- payload = {'timestart': str(self.starttime),
- 'duration': str(duration), 'status': Result}
-
- return payload
diff --git a/testcases/Controllers/ONOS/Teston/dependencies/onos b/testcases/Controllers/ONOS/Teston/dependencies/onos
deleted file mode 100644
index bb02fa899..000000000
--- a/testcases/Controllers/ONOS/Teston/dependencies/onos
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-# -----------------------------------------------------------------------------
-# ONOS remote command-line client.
-# -----------------------------------------------------------------------------
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
-. /root/.bashrc
-. $ONOS_ROOT/tools/build/envDefaults
-. $ONOS_ROOT/tools/test/bin/find-node.sh
-
-[ "$1" = "-w" ] && shift && onos-wait-for-start $1
-
-[ -n "$1" ] && OCI=$(find_node $1) && shift
-
-if which client 1>/dev/null 2>&1 && [ -z "$ONOS_USE_SSH" ]; then
- # Use Karaf client only if we can and are allowed to
- unset KARAF_HOME
- client -h $OCI -u karaf "$@" 2>/dev/null
-else
- # Otherwise use raw ssh; strict checking is off for dev environments only
- #ssh -p 8101 -o StrictHostKeyChecking=no $OCI "$@"
- sshpass -p karaf ssh -l karaf -p 8101 $OCI "$@"
-fi
diff --git a/testcases/Controllers/ONOS/Teston/log/gitignore b/testcases/Controllers/ONOS/Teston/log/gitignore
deleted file mode 100644
index e69de29bb..000000000
--- a/testcases/Controllers/ONOS/Teston/log/gitignore
+++ /dev/null
diff --git a/testcases/Controllers/ONOS/Teston/onosfunctest.py b/testcases/Controllers/ONOS/Teston/onosfunctest.py
deleted file mode 100755
index c8045fd12..000000000
--- a/testcases/Controllers/ONOS/Teston/onosfunctest.py
+++ /dev/null
@@ -1,270 +0,0 @@
-"""
-Description: This test is to run onos Teston VTN scripts
-
-List of test cases:
-CASE1 - Northbound NBI test network/subnet/ports
-CASE2 - Ovsdb test&Default configuration&Vm go online
-
-lanqinglong@huawei.com
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-"""
-
-import datetime
-import os
-import re
-import time
-
-import argparse
-from neutronclient.v2_0 import client as neutronclient
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as openstack_utils
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-t", "--testcase", help="Testcase name")
-args = parser.parse_args()
-
-
-""" logging configuration """
-logger = ft_logger.Logger("onos").getLogger()
-
-# onos parameters
-TEST_DB = ft_utils.get_functest_config("results.test_db_url")
-ONOS_REPO_PATH = \
- ft_utils.get_functest_config("general.directories.dir_repos")
-ONOS_CONF_DIR = \
- ft_utils.get_functest_config("general.directories.dir_functest_conf")
-
-ONOSCI_PATH = ONOS_REPO_PATH + "/"
-starttime = datetime.datetime.now()
-
-HOME = os.environ['HOME'] + "/"
-INSTALLER_TYPE = os.environ['INSTALLER_TYPE']
-DEPLOY_SCENARIO = os.environ['DEPLOY_SCENARIO']
-ONOSCI_PATH = ONOS_REPO_PATH + "/"
-GLANCE_IMAGE_NAME = ft_utils.get_functest_config("onos_sfc.image_name")
-GLANCE_IMAGE_FILENAME = \
- ft_utils.get_functest_config("onos_sfc.image_file_name")
-GLANCE_IMAGE_PATH = \
- ft_utils.get_functest_config("general.directories.dir_functest_data") + \
- "/" + GLANCE_IMAGE_FILENAME
-SFC_PATH = ft_utils.FUNCTEST_REPO + "/" + \
- ft_utils.get_functest_config("general.directories.dir_onos_sfc")
-
-
-def RunScript(testname):
- """
- Run ONOS Test Script
- Parameters:
- testname: ONOS Testcase Name
- """
- runtest = ONOSCI_PATH + "onos/TestON/bin/cli.py run " + testname
- logger.debug("Run script " + testname)
- os.system(runtest)
-
-
-def DownloadCodes(url="https://github.com/wuwenbin2/OnosSystemTest.git"):
- """
- Download Onos Teston codes
- Parameters:
- url: github url
- """
- downloadcode = "git clone " + url + " " + ONOSCI_PATH + "OnosSystemTest"
- logger.debug("Download Onos Teston codes " + url)
- os.system(downloadcode)
-
-
-def GetResult():
- LOGPATH = ONOSCI_PATH + "onos/TestON/logs"
- cmd = "grep -rnh " + "Fail" + " " + LOGPATH
- Resultbuffer = os.popen(cmd).read()
- # duration = datetime.datetime.now() - starttime
- time.sleep(2)
-
- if re.search("\s+[1-9]+\s+", Resultbuffer):
- logger.debug("Testcase Fails\n" + Resultbuffer)
- # Result = "Failed"
- else:
- logger.debug("Testcases Success")
- # Result = "Success"
- # payload={'timestart': str(starttime),
- # 'duration': str(duration),
- # 'status': Result}
- cmd = "grep -rnh 'Execution Time' " + LOGPATH
- Resultbuffer = os.popen(cmd).read()
- time1 = Resultbuffer[114:128]
- time2 = Resultbuffer[28:42]
- cmd = "grep -rnh 'Success Percentage' " + LOGPATH + "/FUNCvirNetNB_*"
- Resultbuffer = os.popen(cmd).read()
- if Resultbuffer.find('100%') >= 0:
- result1 = 'Success'
- else:
- result1 = 'Failed'
- cmd = "grep -rnh 'Success Percentage' " + LOGPATH + "/FUNCvirNetNBL3*"
- Resultbuffer = os.popen(cmd).read()
- if Resultbuffer.find('100%') >= 0:
- result2 = 'Success'
- else:
- result2 = 'Failed'
- status1 = []
- status2 = []
- cmd = "grep -rnh 'h3' " + LOGPATH + "/FUNCvirNetNB_*"
- Resultbuffer = os.popen(cmd).read()
- pattern = re.compile("<h3>([^-]+) - ([^-]+) - (\S*)</h3>")
- # res = pattern.search(Resultbuffer).groups()
- res = pattern.findall(Resultbuffer)
- i = 0
- for index in range(len(res)):
- status1.append({'Case name:': res[i][0] + res[i][1],
- 'Case result': res[i][2]})
- i = i + 1
- cmd = "grep -rnh 'h3' " + LOGPATH + "/FUNCvirNetNBL3*"
- Resultbuffer = os.popen(cmd).read()
- pattern = re.compile("<h3>([^-]+) - ([^-]+) - (\S*)</h3>")
- # res = pattern.search(Resultbuffer).groups()
- res = pattern.findall(Resultbuffer)
- i = 0
- for index in range(len(res)):
- status2.append({'Case name:': res[i][0] + res[i][1],
- 'Case result': res[i][2]})
- i = i + 1
- payload = {'timestart': str(starttime),
- 'FUNCvirNet': {'duration': time1,
- 'result': result1,
- 'status': status1},
- 'FUNCvirNetL3': {'duration': time2,
- 'result': result2,
- 'status': status2}}
- return payload
-
-
-def SetOnosIp():
- cmd = "openstack catalog show network | grep publicURL"
- cmd_output = os.popen(cmd).read()
- OC1 = re.search(r"\d+\.\d+\.\d+\.\d+", cmd_output).group()
- os.environ['OC1'] = OC1
- time.sleep(2)
- logger.debug("ONOS IP is " + OC1)
-
-
-def SetOnosIpForJoid():
- cmd = "env | grep SDN_CONTROLLER"
- cmd_output = os.popen(cmd).read()
- OC1 = re.search(r"\d+\.\d+\.\d+\.\d+", cmd_output).group()
- os.environ['OC1'] = OC1
- time.sleep(2)
- logger.debug("ONOS IP is " + OC1)
-
-
-def CleanOnosTest():
- TESTONPATH = ONOSCI_PATH + "onos/"
- cmd = "rm -rf " + TESTONPATH
- os.system(cmd)
- time.sleep(2)
- logger.debug("Clean ONOS Teston")
-
-
-def CreateImage():
- glance_client = openstack_utils.get_glance_client()
- image_id = openstack_utils.create_glance_image(glance_client,
- GLANCE_IMAGE_NAME,
- GLANCE_IMAGE_PATH)
- EXIT_CODE = -1
- if not image_id:
- logger.error("Failed to create a Glance image...")
- return(EXIT_CODE)
- logger.debug("Image '%s' with ID=%s created successfully."
- % (GLANCE_IMAGE_NAME, image_id))
-
-
-def SfcTest():
- cmd = "python " + SFC_PATH + "Sfc.py"
- logger.debug("Run sfc tests")
- os.system(cmd)
-
-
-def GetIp(type):
- cmd = "openstack catalog show " + type + " | grep publicURL"
- cmd_output = os.popen(cmd).read()
- ip = re.search(r"\d+\.\d+\.\d+\.\d+", cmd_output).group()
- return ip
-
-
-def Replace(before, after):
- file = "Sfc_fun.py"
- cmd = "sed -i 's/" + before + "/" + after + "/g' " + SFC_PATH + file
- os.system(cmd)
-
-
-def SetSfcConf():
- Replace("keystone_ip", GetIp("keystone"))
- Replace("neutron_ip", GetIp("neutron"))
- Replace("nova_ip", GetIp("nova"))
- Replace("glance_ip", GetIp("glance"))
- pwd = os.environ['OS_PASSWORD']
- Replace("console", pwd)
- creds_neutron = openstack_utils.get_credentials("neutron")
- neutron_client = neutronclient.Client(**creds_neutron)
- ext_net = openstack_utils.get_external_net(neutron_client)
- Replace("admin_floating_net", ext_net)
- logger.info("Modify configuration for SFC")
-
-
-def OnosTest():
- start_time = time.time()
- stop_time = start_time
- if INSTALLER_TYPE == "joid":
- logger.debug("Installer is Joid")
- SetOnosIpForJoid()
- else:
- SetOnosIp()
- RunScript("FUNCvirNetNB")
- RunScript("FUNCvirNetNBL3")
- try:
- logger.debug("Push ONOS results into DB")
- # TODO check path result for the file
- result = GetResult()
- stop_time = time.time()
-
- # ONOS success criteria = all tests OK
- # i.e. FUNCvirNet & FUNCvirNetL3
- status = "FAIL"
- try:
- if (result['FUNCvirNet']['result'] == "Success" and
- result['FUNCvirNetL3']['result'] == "Success"):
- status = "PASS"
- except:
- logger.error("Unable to set ONOS criteria")
-
- ft_utils.push_results_to_db("functest",
- "onos",
- start_time,
- stop_time,
- status,
- result)
-
- except:
- logger.error("Error pushing results into Database")
-
- if status == "FAIL":
- EXIT_CODE = -1
- exit(EXIT_CODE)
-
-
-def main():
-
- if args.testcase == "sfc":
- CreateImage()
- SetSfcConf()
- SfcTest()
- else:
- OnosTest()
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/Controllers/__init__.py b/testcases/Controllers/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/testcases/Controllers/__init__.py
+++ /dev/null
diff --git a/testcases/OpenStack/examples/create_instance_and_ip.py b/testcases/OpenStack/examples/create_instance_and_ip.py
deleted file mode 100755
index 50cdf8a57..000000000
--- a/testcases/OpenStack/examples/create_instance_and_ip.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# This script boots an instance and assigns a floating ip
-#
-
-import argparse
-import os
-import sys
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("create_instance_and_ip").getLogger()
-
-HOME = os.environ['HOME'] + "/"
-
-VM_BOOT_TIMEOUT = 180
-
-INSTANCE_NAME = ft_utils.get_functest_config("example.example_vm_name")
-FLAVOR = ft_utils.get_functest_config("example.example_flavor")
-IMAGE_NAME = ft_utils.get_functest_config("example.example_image_name")
-IMAGE_FILENAME = \
- ft_utils.get_functest_config("general.openstack.image_file_name")
-IMAGE_FORMAT = \
- ft_utils.get_functest_config("general.openstack.image_disk_format")
-IMAGE_PATH = \
- ft_utils.get_functest_config("general.directories.dir_functest_data") + \
- "/" + IMAGE_FILENAME
-
-# NEUTRON Private Network parameters
-
-NET_NAME = ft_utils.get_functest_config("example.example_private_net_name")
-SUBNET_NAME = \
- ft_utils.get_functest_config("example.example_private_subnet_name")
-SUBNET_CIDR = \
- ft_utils.get_functest_config("example.example_private_subnet_cidr")
-ROUTER_NAME = ft_utils.get_functest_config("example.example_router_name")
-
-SECGROUP_NAME = ft_utils.get_functest_config("example.example_sg_name")
-SECGROUP_DESCR = ft_utils.get_functest_config("example.example_sg_descr")
-
-TEST_DB = ft_utils.get_functest_config("results.test_db_url")
-
-
-def main():
-
- nova_client = os_utils.get_nova_client()
- neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
-
- image_id = os_utils.create_glance_image(glance_client,
- IMAGE_NAME,
- IMAGE_PATH,
- disk=IMAGE_FORMAT,
- container="bare",
- public=True)
-
- network_dic = os_utils.create_network_full(neutron_client,
- NET_NAME,
- SUBNET_NAME,
- ROUTER_NAME,
- SUBNET_CIDR)
- if not network_dic:
- logger.error(
- "There has been a problem when creating the neutron network")
- sys.exit(-1)
-
- network_id = network_dic["net_id"]
-
- sg_id = os_utils.create_security_group_full(neutron_client,
- SECGROUP_NAME, SECGROUP_DESCR)
-
- # boot INTANCE
- logger.info("Creating instance '%s'..." % INSTANCE_NAME)
- logger.debug(
- "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
- "network=%s \n" % (INSTANCE_NAME, FLAVOR, image_id, network_id))
- instance = os_utils.create_instance_and_wait_for_active(FLAVOR,
- image_id,
- network_id,
- INSTANCE_NAME)
-
- if instance is None:
- logger.error("Error while booting instance.")
- sys.exit(-1)
- # Retrieve IP of INSTANCE
- instance_ip = instance.networks.get(NET_NAME)[0]
- logger.debug("Instance '%s' got private ip '%s'." %
- (INSTANCE_NAME, instance_ip))
-
- logger.info("Adding '%s' to security group '%s'..."
- % (INSTANCE_NAME, SECGROUP_NAME))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
-
- logger.info("Creating floating IP for VM '%s'..." % INSTANCE_NAME)
- floatip_dic = os_utils.create_floating_ip(neutron_client)
- floatip = floatip_dic['fip_addr']
- # floatip_id = floatip_dic['fip_id']
-
- if floatip is None:
- logger.error("Cannot create floating IP.")
- sys.exit(-1)
- logger.info("Floating IP created: '%s'" % floatip)
-
- logger.info("Associating floating ip: '%s' to VM '%s' "
- % (floatip, INSTANCE_NAME))
- if not os_utils.add_floating_ip(nova_client, instance.id, floatip):
- logger.error("Cannot associate floating IP to VM.")
- sys.exit(-1)
-
- sys.exit(0)
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/OpenStack/healthcheck/healthcheck.sh b/testcases/OpenStack/healthcheck/healthcheck.sh
deleted file mode 100755
index 996aadcf7..000000000
--- a/testcases/OpenStack/healthcheck/healthcheck.sh
+++ /dev/null
@@ -1,261 +0,0 @@
-#
-# OpenStack Health Check
-# This script is meant for really basic API operations on OpenStack
-# Services tested: Keystone, Glance, Cinder, Neutron, Nova
-#
-#
-# Author:
-# jose.lausuch@ericsson.com
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-set -e
-
-#Redirect all the output (stdout) to a log file and show only possible errors.
-LOG_FILE=/home/opnfv/functest/results/healthcheck.log
-YAML_FILE=${CONFIG_FUNCTEST_YAML}
-echo "">$LOG_FILE
-exec 1<>$LOG_FILE
-
-info () {
- echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - INFO - " "$*" | tee -a $LOG_FILE 1>&2
-}
-
-debug () {
- if [[ "${CI_DEBUG,,}" == "true" ]]; then
- echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - DEBUG - " "$*" | tee -a $LOG_FILE 1>&2
- fi
-}
-
-error () {
- echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - ERROR - " "$*" | tee -a $LOG_FILE 1>&2
- exit 1
-}
-
-if [ -z $OS_AUTH_URL ]; then
- echo "Source credentials first."
- exit 1
-fi
-
-
-echo "Using following credentials:"
-env | grep OS
-
-## Variables:
-project_1="opnfv-tenant1"
-project_2="opnfv-tenant2"
-user_1="opnfv_user1"
-user_2="opnfv_user2"
-user_3="opnfv_user3"
-user_4="opnfv_user4"
-user_5="opnfv_user5"
-user_6="opnfv_user6"
-kernel_image="opnfv-kernel-img"
-ramdisk_image="opnfv-ramdisk-img"
-image_1="opnfv-image1"
-image_2="opnfv-image2"
-volume_1="opnfv-volume1"
-volume_2="opnfv-volume2"
-net_1="opnfv-network1"
-net_2="opnfv-network2"
-subnet_1="opnfv-subnet1"
-subnet_2="opnfv-subnet2"
-port_1="opnfv-port1"
-port_2="opnfv-port2"
-router_1="opnfv-router1"
-router_2="opnfv-router2"
-flavor="m1.tiny"
-instance_1="opnfv-instance1"
-instance_2="opnfv-instance2"
-instance_3="opnfv-instance3"
-instance_4="opnfv-instance4"
-
-
-
-function wait_for_ip() {
- # $1 is the instance name
- # $2 is the first octet of the subnet ip
- timeout=60
- while [[ ${timeout} > 0 ]]; do
- if [[ $(nova console-log $1|grep "No lease, failing") ]]; then
- error "The instance $1 couldn't get an IP from the DHCP agent." | tee -a $LOG_FILE 1>&2
- exit 1
- elif [[ $(nova console-log $1|grep "^Lease"|grep "obtained") ]]; then
- debug "The instance $1 got an IP successfully from the DHCP agent." | tee -a $LOG_FILE 1>&2
- break
- fi
- let timeout=timeout-1
- sleep 1
- done
-}
-
-
-#################################
-info "Testing Keystone API..." | tee -a $LOG_FILE 1>&2
-#################################
-openstack project create ${project_1}
-debug "project '${project_1}' created."
-openstack project create ${project_2}
-debug "project '${project_2}' created."
-openstack user create ${user_1} --project ${project_1}
-debug "user '${user_1}' created in project ${project_1}."
-openstack user create ${user_2} --project ${project_1}
-debug "user '${user_2}' created in project ${project_1}."
-openstack user create ${user_3} --project ${project_1}
-debug "user '${user_3}' created in project ${project_1}."
-openstack user create ${user_4} --project ${project_2}
-debug "user '${user_4}' created in project ${project_2}."
-openstack user create ${user_5} --project ${project_2}
-debug "user '${user_5}' created in project ${project_2}."
-openstack user create ${user_6} --project ${project_2}
-debug "user '${user_6}' created in project ${project_2}."
-info "...Keystone OK!"
-
-#################################
-info "Testing Glance API..."
-#################################
-disk_img=$(cat ${YAML_FILE} | shyaml get-value healthcheck.disk_image 2> /dev/null || true)
-disk_format=$(cat ${YAML_FILE} | shyaml get-value healthcheck.disk_format 2> /dev/null || true)
-kernel_img=$(cat ${YAML_FILE} | shyaml get-value healthcheck.kernel_image 2> /dev/null || true)
-ramdisk_img=$(cat ${YAML_FILE} | shyaml get-value healthcheck.ramdisk_image 2> /dev/null || true)
-extra_properties=$(cat ${YAML_FILE} | shyaml get-value healthcheck.extra_properties 2> /dev/null || true)
-
-# Test if we need to create a 3part image
-if [ "X$kernel_img" != "X" ]
-then
- img_id=$(glance image-create --name ${kernel_image} --disk-format aki \
- --container-format bare < ${kernel_img} | awk '$2 == "id" { print $4 }')
- extra_opts="--property kernel_id=${img_id}"
-
- if [ "X$ramdisk_img" != "X" ]
- then
- img_id=$(glance image-create --name ${ramdisk_image} --disk-format ari \
- --container-format bare < ${ramdisk_img} | awk '$2 == "id" { print $4 }')
- extra_opts="$extra_opts --property ramdisk_id=${img_id}"
- fi
-fi
-
-if [ "X$extra_properties" != "X" ]
-then
- keys=$(cat ${YAML_FILE} | shyaml keys healthcheck.extra_properties)
- for key in ${keys}
- do
- value=$(cat ${YAML_FILE} | shyaml get-value healthcheck.extra_properties.${key})
- extra_opts="$extra_opts --property ${key}=\"${value}\""
- done
-fi
-
-debug "image extra_properties=${extra_properties}"
-
-eval glance image-create --name ${image_1} --disk-format ${disk_format} --container-format bare \
- ${extra_opts} < ${disk_img}
-debug "image '${image_1}' created."
-eval glance image-create --name ${image_2} --disk-format ${disk_format} --container-format bare \
- ${extra_opts} < ${disk_img}
-debug "image '${image_2}' created."
-info "... Glance OK!"
-
-#################################
-info "Testing Cinder API..."
-#################################
-cinder create --display_name ${volume_1} 1
-debug "volume '${volume_1}' created."
-cinder create --display_name ${volume_2} 10
-debug "volume '${volume_2}' created."
-info "...Cinder OK!"
-
-#################################
-info "Testing Neutron API..."
-#################################
-
-network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}'))
-for id in ${network_ids[@]}; do
- [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && ext_net_id=${id}
-done
-if [[ "${ext_net_id}" == "" ]]; then
- error "No external network found. Exiting Health Check..."
- exit 1
-else
- info "External network found. ${ext_net_id}"
-fi
-
-info "1. Create Networks..."
-neutron net-create ${net_1}
-debug "net '${net_1}' created."
-neutron net-create ${net_2}
-debug "net '${net_2}' created."
-net1_id=$(neutron net-list | grep ${net_1} | awk '{print $2}')
-net2_id=$(neutron net-list | grep ${net_2} | awk '{print $2}')
-
-info "2. Create subnets..."
-neutron subnet-create --name ${subnet_1} --allocation-pool start=10.6.0.2,end=10.6.0.253 --gateway 10.6.0.254 ${net_1} 10.6.0.0/24
-debug "subnet '${subnet_1}' created."
-neutron subnet-create --name ${subnet_2} --allocation-pool start=10.7.0.2,end=10.7.0.253 --gateway 10.7.0.254 ${net_2} 10.7.0.0/24
-debug "subnet '${subnet_2}' created."
-
-info "3. Create Routers..."
-neutron router-create ${router_1}
-debug "router '${router_1}' created."
-neutron router-create ${router_2}
-debug "router '${router_2}' created."
-
-neutron router-gateway-set ${router_1} ${ext_net_id}
-debug "router '${router_1}' gateway set to ${ext_net_id}."
-neutron router-gateway-set ${router_2} ${ext_net_id}
-debug "router '${router_2}' gateway set to ${ext_net_id}."
-
-neutron router-interface-add ${router_1} ${subnet_1}
-debug "router '${router_1}' interface added ${subnet_1}."
-neutron router-interface-add ${router_2} ${subnet_2}
-debug "router '${router_2}' interface added ${subnet_2}."
-
-info "...Neutron OK!"
-
-#################################
-info "Testing Nova API..."
-#################################
-
-# This delay should be removed after resolving Jira case APEX-149.
-# The purpose is to give some time to populate openflow rules
-# by SDN controller in case of odl_l2 scenario.
-sleep 60
-
-
-# Check if flavor exists
-if [[ -z $(nova flavor-list|grep $flavor) ]]; then
- # if given flavor doesn't exist, we create one
- debug "Flavor $flavor doesn't exist. Creating a new flavor."
- nova flavor-create --is-public false ${flavor} auto 512 1 1 --is-public True
-fi
-debug "Using flavor $flavor to boot the instances."
-
-
-nova boot --flavor ${flavor} --image ${image_1} --nic net-id=${net1_id} ${instance_1}
-debug "nova instance '${instance_1}' booted on ${net_1}."
-nova boot --flavor ${flavor} --image ${image_1} --nic net-id=${net1_id} ${instance_2}
-debug "nova instance '${instance_2}' booted on ${net_1}."
-nova boot --flavor ${flavor} --image ${image_2} --nic net-id=${net2_id} ${instance_3}
-debug "nova instance '${instance_3}' booted on ${net_2}."
-nova boot --flavor ${flavor} --image ${image_2} --nic net-id=${net2_id} ${instance_4}
-debug "nova instance '${instance_4}' booted on ${net_2}."
-
-vm1_id=$(nova list | grep ${instance_1} | awk '{print $2}')
-vm2_id=$(nova list | grep ${instance_2} | awk '{print $2}')
-vm3_id=$(nova list | grep ${instance_3} | awk '{print $2}')
-vm4_id=$(nova list | grep ${instance_4} | awk '{print $2}')
-info "...Nova OK!"
-
-info "Checking if instances get an IP from DHCP..."
-wait_for_ip ${instance_1} "10.6"
-wait_for_ip ${instance_2} "10.6"
-wait_for_ip ${instance_3} "10.7"
-wait_for_ip ${instance_4} "10.7"
-info "...DHCP OK!"
-
-info "Health check passed!"
-exit 0
diff --git a/testcases/OpenStack/rally/blacklist.txt b/testcases/OpenStack/rally/blacklist.txt
deleted file mode 100644
index 3a17fa616..000000000
--- a/testcases/OpenStack/rally/blacklist.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-scenario:
- -
- scenarios:
- - os-nosdn-lxd-ha
- - os-nosdn-lxd-noha
- installers:
- - joid
- tests:
- - NovaServers.boot_server_from_volume_and_delete
-
-functionality:
- -
- functions:
- - no_live_migration
- tests:
- - NovaServers.boot_and_live_migrate_server
- - NovaServers.boot_server_attach_created_volume_and_live_migrate
- - NovaServers.boot_server_from_volume_and_live_migrate
diff --git a/testcases/OpenStack/rally/macro/macro.yaml b/testcases/OpenStack/rally/macro/macro.yaml
deleted file mode 100644
index 48c0333e9..000000000
--- a/testcases/OpenStack/rally/macro/macro.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-{%- macro user_context(tenants,users_per_tenant, use_existing_users) -%}
-{%- if use_existing_users and caller is not defined -%} {}
-{%- else %}
- {%- if not use_existing_users %}
- users:
- tenants: {{ tenants }}
- users_per_tenant: {{ users_per_tenant }}
- {%- endif %}
- {%- if caller is defined %}
- {{ caller() }}
- {%- endif %}
-{%- endif %}
-{%- endmacro %}
-
-{%- macro vm_params(image=none, flavor=none, size=none) %}
-{%- if flavor is not none %}
- flavor:
- name: {{ flavor }}
-{%- endif %}
-{%- if image is not none %}
- image:
- name: {{ image }}
-{%- endif %}
-{%- if size is not none %}
- size: {{ size }}
-{%- endif %}
-{%- endmacro %}
-
-{%- macro unlimited_volumes() %}
- cinder:
- gigabytes: -1
- snapshots: -1
- volumes: -1
-{%- endmacro %}
-
-{%- macro constant_runner(concurrency=1, times=1, is_smoke=True) %}
- type: "constant"
- {%- if is_smoke %}
- concurrency: 1
- times: 1
- {%- else %}
- concurrency: {{ concurrency }}
- times: {{ times }}
- {%- endif %}
-{%- endmacro %}
-
-{%- macro rps_runner(rps=1, times=1, is_smoke=True) %}
- type: rps
- {%- if is_smoke %}
- rps: 1
- times: 1
- {%- else %}
- rps: {{ rps }}
- times: {{ times }}
- {%- endif %}
-{%- endmacro %}
-
-{%- macro no_failures_sla() %}
- failure_rate:
- max: 0
-{%- endmacro %}
-
-{%- macro volumes(size=1, volumes_per_tenant=1) %}
- volumes:
- size: {{ size }}
- volumes_per_tenant: {{ volumes_per_tenant }}
-{%- endmacro %}
-
-{%- macro unlimited_nova(keypairs=false) %}
- nova:
- cores: -1
- floating_ips: -1
- instances: -1
- {%- if keypairs %}
- key_pairs: -1
- {%- endif %}
- ram: -1
- security_group_rules: -1
- security_groups: -1
-{%- endmacro %}
-
-{%- macro unlimited_neutron(secgroups=false) %}
- neutron:
- network: -1
- port: -1
- subnet: -1
- {%- if secgroups %}
- security_group: -1
- security_group_rule: -1
- {%- endif %}
-{%- endmacro %}
-
-{%- macro glance_args(location, container="bare", type="qcow2") %}
- container_format: {{ container }}
- disk_format: {{ type }}
- image_location: {{ location }}
-{%- endmacro %}
diff --git a/testcases/OpenStack/rally/run_rally-cert.py b/testcases/OpenStack/rally/run_rally-cert.py
deleted file mode 100755
index 8b8adce40..000000000
--- a/testcases/OpenStack/rally/run_rally-cert.py
+++ /dev/null
@@ -1,625 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) 2015 Orange
-# guyrodrigue.koffi@orange.com
-# morgan.richomme@orange.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# 0.1 (05/2015) initial commit
-# 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
-# 0.3 (19/10/2015) remove Tempest from run_rally
-# and push result into test DB
-#
-""" tests configuration """
-
-import json
-import os
-import re
-import subprocess
-import time
-
-import argparse
-import iniparse
-import yaml
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-
-tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
- 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
-parser = argparse.ArgumentParser()
-parser.add_argument("test_name",
- help="Module name to be tested. "
- "Possible values are : "
- "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
- "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
- "{d[10]} ] "
- "The 'all' value "
- "performs all possible test scenarios"
- .format(d=tests))
-
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-parser.add_argument("-s", "--smoke",
- help="Smoke test mode",
- action="store_true")
-parser.add_argument("-v", "--verbose",
- help="Print verbose info about the progress",
- action="store_true")
-parser.add_argument("-n", "--noclean",
- help="Don't clean the created resources for this test.",
- action="store_true")
-parser.add_argument("-z", "--sanity",
- help="Sanity test mode, execute only a subset of tests",
- action="store_true")
-
-args = parser.parse_args()
-
-network_dict = {}
-
-if args.verbose:
- RALLY_STDERR = subprocess.STDOUT
-else:
- RALLY_STDERR = open(os.devnull, 'w')
-
-""" logging configuration """
-logger = ft_logger.Logger("run_rally").getLogger()
-
-
-HOME = os.environ['HOME'] + "/"
-RALLY_DIR = ft_utils.FUNCTEST_REPO + '/' + \
- ft_utils.get_functest_config('general.directories.dir_rally')
-SANITY_MODE_DIR = RALLY_DIR + "scenario/sanity"
-FULL_MODE_DIR = RALLY_DIR + "scenario/full"
-TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
-SUPPORT_DIR = RALLY_DIR + "scenario/support"
-TEMP_DIR = RALLY_DIR + "var"
-BLACKLIST_FILE = RALLY_DIR + "blacklist.txt"
-
-FLAVOR_NAME = "m1.tiny"
-USERS_AMOUNT = 2
-TENANTS_AMOUNT = 3
-ITERATIONS_AMOUNT = 10
-CONCURRENCY = 4
-
-RESULTS_DIR = \
- ft_utils.get_functest_config('general.directories.dir_rally_res')
-TEMPEST_CONF_FILE = \
- ft_utils.get_functest_config('general.directories.dir_results') + \
- '/tempest/tempest.conf'
-TEST_DB = ft_utils.get_functest_config('results.test_db_url')
-
-PRIVATE_NET_NAME = ft_utils.get_functest_config('rally.network_name')
-PRIVATE_SUBNET_NAME = ft_utils.get_functest_config('rally.subnet_name')
-PRIVATE_SUBNET_CIDR = ft_utils.get_functest_config('rally.subnet_cidr')
-ROUTER_NAME = ft_utils.get_functest_config('rally.router_name')
-
-GLANCE_IMAGE_NAME = \
- ft_utils.get_functest_config('general.openstack.image_name')
-GLANCE_IMAGE_FILENAME = \
- ft_utils.get_functest_config('general.openstack.image_file_name')
-GLANCE_IMAGE_FORMAT = \
- ft_utils.get_functest_config('general.openstack.image_disk_format')
-GLANCE_IMAGE_PATH = \
- ft_utils.get_functest_config('general.directories.dir_functest_data') + \
- "/" + GLANCE_IMAGE_FILENAME
-
-CINDER_VOLUME_TYPE_NAME = "volume_test"
-
-
-SUMMARY = []
-neutron_client = None
-
-
-def get_task_id(cmd_raw):
- """
- get task id from command rally result
- :param cmd_raw:
- :return: task_id as string
- """
- taskid_re = re.compile('^Task +(.*): started$')
- for line in cmd_raw.splitlines(True):
- line = line.strip()
- match = taskid_re.match(line)
- if match:
- return match.group(1)
- return None
-
-
-def task_succeed(json_raw):
- """
- Parse JSON from rally JSON results
- :param json_raw:
- :return: Bool
- """
- rally_report = json.loads(json_raw)
- for report in rally_report:
- if report is None or report.get('result') is None:
- return False
-
- for result in report.get('result'):
- if result is None or len(result.get('error')) > 0:
- return False
-
- return True
-
-
-def live_migration_supported():
- config = iniparse.ConfigParser()
- if (config.read(TEMPEST_CONF_FILE) and
- config.has_section('compute-feature-enabled') and
- config.has_option('compute-feature-enabled', 'live_migration')):
- return config.getboolean('compute-feature-enabled', 'live_migration')
-
- return False
-
-
-def build_task_args(test_file_name):
- task_args = {'service_list': [test_file_name]}
- task_args['image_name'] = GLANCE_IMAGE_NAME
- task_args['flavor_name'] = FLAVOR_NAME
- task_args['glance_image_location'] = GLANCE_IMAGE_PATH
- task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
- task_args['tmpl_dir'] = TEMPLATE_DIR
- task_args['sup_dir'] = SUPPORT_DIR
- task_args['users_amount'] = USERS_AMOUNT
- task_args['tenants_amount'] = TENANTS_AMOUNT
- task_args['use_existing_users'] = False
- task_args['iterations'] = ITERATIONS_AMOUNT
- task_args['concurrency'] = CONCURRENCY
-
- if args.sanity:
- task_args['smoke'] = True
- else:
- task_args['smoke'] = args.smoke
-
- ext_net = os_utils.get_external_net(neutron_client)
- if ext_net:
- task_args['floating_network'] = str(ext_net)
- else:
- task_args['floating_network'] = ''
-
- net_id = network_dict['net_id']
- task_args['netid'] = str(net_id)
-
- auth_url = os.getenv('OS_AUTH_URL')
- if auth_url is not None:
- task_args['request_url'] = auth_url.rsplit(":", 1)[0]
- else:
- task_args['request_url'] = ''
-
- return task_args
-
-
-def get_output(proc, test_name):
- global SUMMARY
- result = ""
- nb_tests = 0
- overall_duration = 0.0
- success = 0.0
- nb_totals = 0
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- if args.verbose:
- result += line
- else:
- if ("Load duration" in line or
- "started" in line or
- "finished" in line or
- " Preparing" in line or
- "+-" in line or
- "|" in line):
- result += line
- elif "test scenario" in line:
- result += "\n" + line
- elif "Full duration" in line:
- result += line + "\n\n"
-
- # parse output for summary report
- if ("| " in line and
- "| action" not in line and
- "| Starting" not in line and
- "| Completed" not in line and
- "| ITER" not in line and
- "| " not in line and
- "| total" not in line):
- nb_tests += 1
- elif "| total" in line:
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- try:
- success += float(percentage)
- except ValueError:
- logger.info('Percentage error: %s, %s' % (percentage, line))
- nb_totals += 1
- elif "Full duration" in line:
- duration = line.split(': ')[1]
- try:
- overall_duration += float(duration)
- except ValueError:
- logger.info('Duration error: %s, %s' % (duration, line))
-
- overall_duration = "{:10.2f}".format(overall_duration)
- if nb_totals == 0:
- success_avg = 0
- else:
- success_avg = "{:0.2f}".format(success / nb_totals)
-
- scenario_summary = {'test_name': test_name,
- 'overall_duration': overall_duration,
- 'nb_tests': nb_tests,
- 'success': success_avg}
- SUMMARY.append(scenario_summary)
-
- logger.debug("\n" + result)
-
- return result
-
-
-def get_cmd_output(proc):
- result = ""
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- result += line
-
- return result
-
-
-def excl_scenario():
- black_tests = []
-
- try:
- with open(BLACKLIST_FILE, 'r') as black_list_file:
- black_list_yaml = yaml.safe_load(black_list_file)
-
- installer_type = os.getenv('INSTALLER_TYPE')
- deploy_scenario = os.getenv('DEPLOY_SCENARIO')
- if (bool(installer_type) * bool(deploy_scenario)):
- if 'scenario' in black_list_yaml.keys():
- for item in black_list_yaml['scenario']:
- scenarios = item['scenarios']
- installers = item['installers']
- if (deploy_scenario in scenarios and
- installer_type in installers):
- tests = item['tests']
- black_tests.extend(tests)
- except:
- logger.debug("Scenario exclusion not applied.")
-
- return black_tests
-
-
-def excl_func():
- black_tests = []
- func_list = []
-
- try:
- with open(BLACKLIST_FILE, 'r') as black_list_file:
- black_list_yaml = yaml.safe_load(black_list_file)
-
- if not live_migration_supported():
- func_list.append("no_live_migration")
-
- if 'functionality' in black_list_yaml.keys():
- for item in black_list_yaml['functionality']:
- functions = item['functions']
- for func in func_list:
- if func in functions:
- tests = item['tests']
- black_tests.extend(tests)
- except:
- logger.debug("Functionality exclusion not applied.")
-
- return black_tests
-
-
-def apply_blacklist(case_file_name, result_file_name):
- logger.debug("Applying blacklist...")
- cases_file = open(case_file_name, 'r')
- result_file = open(result_file_name, 'w')
-
- black_tests = list(set(excl_func() + excl_scenario()))
-
- include = True
- for cases_line in cases_file:
- if include:
- for black_tests_line in black_tests:
- if re.search(black_tests_line, cases_line.strip().rstrip(':')):
- include = False
- break
- else:
- result_file.write(str(cases_line))
- else:
- if cases_line.isspace():
- include = True
-
- cases_file.close()
- result_file.close()
-
-
-def prepare_test_list(test_name):
- scenario_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
- test_name)
- if not os.path.exists(scenario_file_name):
- if args.sanity:
- scenario_file_name = '{}opnfv-{}.yaml'.format(SANITY_MODE_DIR +
- "/", test_name)
- else:
- scenario_file_name = '{}opnfv-{}.yaml'.format(FULL_MODE_DIR +
- "/", test_name)
- if not os.path.exists(scenario_file_name):
- logger.info("The scenario '%s' does not exist."
- % scenario_file_name)
- exit(-1)
-
- logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
- test_file_name = '{}opnfv-{}.yaml'.format(TEMP_DIR + "/", test_name)
-
- if not os.path.exists(TEMP_DIR):
- os.makedirs(TEMP_DIR)
-
- apply_blacklist(scenario_file_name, test_file_name)
- return test_file_name
-
-
-def file_is_empty(file_name):
- try:
- if os.stat(file_name).st_size > 0:
- return False
- except:
- pass
-
- return True
-
-
-def run_task(test_name):
- #
- # the "main" function of the script who launch rally for a task
- # :param test_name: name for the rally test
- # :return: void
- #
- global SUMMARY
- logger.info('Starting test scenario "{}" ...'.format(test_name))
- start_time = time.time()
-
- task_file = '{}task.yaml'.format(RALLY_DIR)
- if not os.path.exists(task_file):
- logger.error("Task file '%s' does not exist." % task_file)
- exit(-1)
-
- file_name = prepare_test_list(test_name)
- if file_is_empty(file_name):
- logger.info('No tests for scenario "{}"'.format(test_name))
- return
-
- cmd_line = ("rally task start --abort-on-sla-failure " +
- "--task {} ".format(task_file) +
- "--task-args \"{}\" ".format(build_task_args(test_name)))
- logger.debug('running command line : {}'.format(cmd_line))
-
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=RALLY_STDERR, shell=True)
- output = get_output(p, test_name)
- task_id = get_task_id(output)
- logger.debug('task_id : {}'.format(task_id))
-
- if task_id is None:
- logger.error('Failed to retrieve task_id, validating task...')
- cmd_line = ("rally task validate " +
- "--task {} ".format(task_file) +
- "--task-args \"{}\" ".format(build_task_args(test_name)))
- logger.debug('running command line : {}'.format(cmd_line))
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, shell=True)
- output = get_cmd_output(p)
- logger.error("Task validation result:" + "\n" + output)
- return
-
- # check for result directory and create it otherwise
- if not os.path.exists(RESULTS_DIR):
- logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
- os.makedirs(RESULTS_DIR)
-
- # write html report file
- report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
- cmd_line = "rally task report {} --out {}".format(task_id,
- report_file_name)
-
- logger.debug('running command line : {}'.format(cmd_line))
- os.popen(cmd_line)
-
- # get and save rally operation JSON result
- cmd_line = "rally task results %s" % task_id
- logger.debug('running command line : {}'.format(cmd_line))
- cmd = os.popen(cmd_line)
- json_results = cmd.read()
- with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
- logger.debug('saving json file')
- f.write(json_results)
-
- with open('{}opnfv-{}.json'
- .format(RESULTS_DIR, test_name)) as json_file:
- json_data = json.load(json_file)
-
- """ parse JSON operation result """
- status = "FAIL"
- if task_succeed(json_results):
- logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
- status = "PASS"
- else:
- logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
-
- # Push results in payload of testcase
- if args.report:
- stop_time = time.time()
- logger.debug("Push Rally detailed results into DB")
- ft_utils.push_results_to_db("functest",
- "Rally_details",
- start_time,
- stop_time,
- status,
- json_data)
-
-
-def main():
- global SUMMARY
- global network_dict
- global neutron_client
-
- nova_client = os_utils.get_nova_client()
- neutron_client = os_utils.get_neutron_client()
- cinder_client = os_utils.get_cinder_client()
-
- start_time = time.time()
-
- # configure script
- if not (args.test_name in tests):
- logger.error('argument not valid')
- exit(-1)
-
- SUMMARY = []
-
- volume_types = os_utils.list_volume_types(cinder_client,
- private=False)
- if not volume_types:
- volume_type = os_utils.create_volume_type(
- cinder_client, CINDER_VOLUME_TYPE_NAME)
- if not volume_type:
- logger.error("Failed to create volume type...")
- exit(-1)
- else:
- logger.debug("Volume type '%s' created succesfully..."
- % CINDER_VOLUME_TYPE_NAME)
- else:
- logger.debug("Using existing volume type(s)...")
-
- image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
- GLANCE_IMAGE_PATH,
- GLANCE_IMAGE_FORMAT)
- if not image_id:
- exit(-1)
-
- logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
- network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
- PRIVATE_SUBNET_NAME,
- ROUTER_NAME,
- PRIVATE_SUBNET_CIDR)
- if not network_dict:
- exit(1)
-
- if args.test_name == "all":
- for test_name in tests:
- if not (test_name == 'all' or
- test_name == 'vm'):
- run_task(test_name)
- else:
- logger.debug("Test name: " + args.test_name)
- run_task(args.test_name)
-
- report = ("\n"
- " "
- "\n"
- " Rally Summary Report\n"
- "\n"
- "+===================+============+===============+===========+"
- "\n"
- "| Module | Duration | nb. Test Run | Success |"
- "\n"
- "+===================+============+===============+===========+"
- "\n")
- payload = []
- stop_time = time.time()
-
- # for each scenario we draw a row for the table
- total_duration = 0.0
- total_nb_tests = 0
- total_success = 0.0
- for s in SUMMARY:
- name = "{0:<17}".format(s['test_name'])
- duration = float(s['overall_duration'])
- total_duration += duration
- duration = time.strftime("%M:%S", time.gmtime(duration))
- duration = "{0:<10}".format(duration)
- nb_tests = "{0:<13}".format(s['nb_tests'])
- total_nb_tests += int(s['nb_tests'])
- success = "{0:<10}".format(str(s['success']) + '%')
- total_success += float(s['success'])
- report += ("" +
- "| " + name + " | " + duration + " | " +
- nb_tests + " | " + success + "|\n" +
- "+-------------------+------------"
- "+---------------+-----------+\n")
- payload.append({'module': name,
- 'details': {'duration': s['overall_duration'],
- 'nb tests': s['nb_tests'],
- 'success': s['success']}})
-
- total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
- total_duration_str2 = "{0:<10}".format(total_duration_str)
- total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-
- if len(SUMMARY):
- success_rate = total_success / len(SUMMARY)
- else:
- success_rate = 100
- success_rate = "{:0.2f}".format(success_rate)
- success_rate_str = "{0:<10}".format(str(success_rate) + '%')
- report += "+===================+============+===============+===========+"
- report += "\n"
- report += ("| TOTAL: | " + total_duration_str2 + " | " +
- total_nb_tests_str + " | " + success_rate_str + "|\n")
- report += "+===================+============+===============+===========+"
- report += "\n"
-
- logger.info("\n" + report)
- payload.append({'summary': {'duration': total_duration,
- 'nb tests': total_nb_tests,
- 'nb success': success_rate}})
-
- if args.sanity:
- case_name = "rally_sanity"
- else:
- case_name = "rally_full"
-
- # Evaluation of the success criteria
- status = ft_utils.check_success_rate(case_name, success_rate)
-
- exit_code = -1
- if status == "PASS":
- exit_code = 0
-
- if args.report:
- logger.debug("Pushing Rally summary into DB...")
- ft_utils.push_results_to_db("functest",
- case_name,
- start_time,
- stop_time,
- status,
- payload)
- if args.noclean:
- exit(exit_code)
-
- if not image_exists:
- logger.debug("Deleting image '%s' with ID '%s'..."
- % (GLANCE_IMAGE_NAME, image_id))
- if not os_utils.delete_glance_image(nova_client, image_id):
- logger.error("Error deleting the glance image")
-
- if not volume_types:
- logger.debug("Deleting volume type '%s'..."
- % CINDER_VOLUME_TYPE_NAME)
- if not os_utils.delete_volume_type(cinder_client, volume_type):
- logger.error("Error in deleting volume type...")
-
- exit(exit_code)
-
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/OpenStack/rally/scenario/full/opnfv-cinder.yaml b/testcases/OpenStack/rally/scenario/full/opnfv-cinder.yaml
deleted file mode 100644
index e844e33f6..000000000
--- a/testcases/OpenStack/rally/scenario/full/opnfv-cinder.yaml
+++ /dev/null
@@ -1,266 +0,0 @@
- CinderVolumes.create_and_attach_volume:
- -
- args:
- {{ vm_params(image_name,flavor_name,1) }}
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_and_list_snapshots:
- -
- args:
- detailed: true
- force: false
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {{ volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_and_list_volume:
- -
- args:
- detailed: true
- {{ vm_params(image_name,none,1) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- detailed: true
- size: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_and_upload_volume_to_image:
- -
- args:
- container_format: "bare"
- disk_format: "raw"
- do_delete: true
- force: false
- size: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_nested_snapshots_and_attach_volume:
- -
- args:
- nested_level: 1
- size:
- max: 1
- min: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- servers:
- {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
- servers_per_tenant: 1
- auto_assign_nic: true
- network: {}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_snapshot_and_attach_volume:
- -
- args:
- volume_type: false
- size:
- min: 1
- max: 5
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- servers:
- {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
- servers_per_tenant: 2
- auto_assign_nic: true
- network: {}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- volume_type: true
- size:
- min: 1
- max: 5
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- servers:
- {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
- servers_per_tenant: 2
- auto_assign_nic: true
- network: {}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_volume:
- -
- args:
- size: 1
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- -
- args:
- size:
- min: 1
- max: 5
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.list_volumes:
- -
- args:
- detailed: True
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- volumes:
- size: 1
- volumes_per_tenant: 4
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_and_delete_snapshot:
- -
- args:
- force: false
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {{ volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_and_delete_volume:
- -
- args:
- size:
- max: 1
- min: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- {{ vm_params(image_name,none,1) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- size: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_and_extend_volume:
- -
- args:
- new_size: 2
- size: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_from_volume_and_delete_volume:
- -
- args:
- size: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {{ volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/full/opnfv-heat.yaml b/testcases/OpenStack/rally/scenario/full/opnfv-heat.yaml
deleted file mode 100644
index 6f3a5c163..000000000
--- a/testcases/OpenStack/rally/scenario/full/opnfv-heat.yaml
+++ /dev/null
@@ -1,140 +0,0 @@
- HeatStacks.create_and_delete_stack:
- -
- args:
- template_path: "{{ tmpl_dir }}/default.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- template_path: "{{ tmpl_dir }}/server_with_ports.yaml.template"
- parameters:
- public_net: {{ floating_network }}
- image: {{ image_name }}
- flavor: {{ flavor_name }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- template_path: "{{ tmpl_dir }}/server_with_volume.yaml.template"
- parameters:
- image: {{ image_name }}
- flavor: {{ flavor_name }}
- network_id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- HeatStacks.create_and_list_stack:
- -
- args:
- template_path: "{{ tmpl_dir }}/default.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- HeatStacks.create_update_delete_stack:
- -
- args:
- template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
- updated_template_path: "{{ tmpl_dir }}/updated_random_strings_add.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
- updated_template_path: "{{ tmpl_dir }}/updated_random_strings_delete.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- template_path: "{{ tmpl_dir }}/resource_group.yaml.template"
- updated_template_path: "{{ tmpl_dir }}/updated_resource_group_increase.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template"
- updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- template_path: "{{ tmpl_dir }}/resource_group.yaml.template"
- updated_template_path: "{{ tmpl_dir }}/updated_resource_group_reduce.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
- updated_template_path: "{{ tmpl_dir }}/updated_random_strings_replace.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- HeatStacks.create_check_delete_stack:
- -
- args:
- template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- HeatStacks.create_suspend_resume_delete_stack:
- -
- args:
- template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- HeatStacks.list_stacks_and_resources:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/full/opnfv-neutron.yaml b/testcases/OpenStack/rally/scenario/full/opnfv-neutron.yaml
deleted file mode 100644
index 0a773533a..000000000
--- a/testcases/OpenStack/rally/scenario/full/opnfv-neutron.yaml
+++ /dev/null
@@ -1,239 +0,0 @@
- NeutronNetworks.create_and_update_networks:
- -
- args:
- network_create_args: {}
- network_update_args:
- admin_state_up: false
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- neutron:
- network: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_update_ports:
- -
- args:
- network_create_args: {}
- port_create_args: {}
- port_update_args:
- admin_state_up: false
- device_id: "dummy_id"
- device_owner: "dummy_owner"
- ports_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- port: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_update_routers:
- -
- args:
- network_create_args: {}
- router_create_args: {}
- router_update_args:
- admin_state_up: false
- subnet_cidr_start: "1.1.0.0/30"
- subnet_create_args: {}
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- port: -1
- router: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_update_subnets:
- -
- args:
- network_create_args: {}
- subnet_cidr_start: "1.4.0.0/16"
- subnet_create_args: {}
- subnet_update_args:
- enable_dhcp: false
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_delete_networks:
- -
- args:
- network_create_args: {}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- neutron:
- network: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_delete_ports:
- -
- args:
- network_create_args: {}
- port_create_args: {}
- ports_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- port: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_delete_routers:
- -
- args:
- network_create_args: {}
- router_create_args: {}
- subnet_cidr_start: "1.1.0.0/30"
- subnet_create_args: {}
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- port: -1
- router: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_delete_subnets:
- -
- args:
- network_create_args: {}
- subnet_cidr_start: "1.1.0.0/30"
- subnet_create_args: {}
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_list_networks:
- -
- args:
- network_create_args: {}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- neutron:
- network: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_list_ports:
- -
- args:
- network_create_args: {}
- port_create_args: {}
- ports_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- port: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_list_routers:
- -
- args:
- network_create_args: {}
- router_create_args: {}
- subnet_cidr_start: "1.1.0.0/30"
- subnet_create_args: {}
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- router: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_list_subnets:
- -
- args:
- network_create_args: {}
- subnet_cidr_start: "1.1.0.0/30"
- subnet_create_args: {}
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/full/opnfv-nova.yaml b/testcases/OpenStack/rally/scenario/full/opnfv-nova.yaml
deleted file mode 100644
index d7622093d..000000000
--- a/testcases/OpenStack/rally/scenario/full/opnfv-nova.yaml
+++ /dev/null
@@ -1,369 +0,0 @@
- NovaKeypair.create_and_delete_keypair:
- -
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_nova(keypairs=true) }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaKeypair.create_and_list_keypairs:
- -
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_nova(keypairs=true) }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_and_bounce_server:
- -
- args:
- actions:
- -
- hard_reboot: 1
- -
- soft_reboot: 1
- -
- stop_start: 1
- -
- rescue_unrescue: 1
- {{ vm_params(image_name, flavor_name) }}
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_neutron() }}
- {{ unlimited_nova() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_and_delete_server:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_neutron() }}
- {{ unlimited_nova() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_and_list_server:
- -
- args:
- detailed: true
- {{ vm_params(image_name, flavor_name) }}
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_neutron() }}
- {{ unlimited_nova() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_and_rebuild_server:
- -
- args:
- {{ vm_params(flavor=flavor_name) }}
- from_image:
- name: {{ image_name }}
- to_image:
- name: {{ image_name }}
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_neutron() }}
- {{ unlimited_nova() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.snapshot_server:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_neutron() }}
- {{ unlimited_nova() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_server_from_volume:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- volume_size: 10
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_server:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaSecGroup.create_and_delete_secgroups:
- -
- args:
- security_group_count: 10
- rules_per_security_group: 10
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_neutron(secgroups=true) }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaSecGroup.create_and_list_secgroups:
- -
- args:
- security_group_count: 10
- rules_per_security_group: 10
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_neutron(secgroups=true) }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.list_servers:
- -
- args:
- detailed: True
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- servers:
- {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
- servers_per_tenant: 2
- auto_assign_nic: true
- network: {}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.resize_server:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- to_flavor:
- name: "m1.small"
- confirm: true
- force_delete: false
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_and_live_migrate_server:
- - args:
- {{ vm_params(image_name, flavor_name) }}
- block_migration: false
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_server_attach_created_volume_and_live_migrate:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- size: 10
- block_migration: false
- boot_server_kwargs:
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_server_from_volume_and_live_migrate:
- - args:
- {{ vm_params(image_name, flavor_name) }}
- block_migration: false
- volume_size: 10
- force_delete: false
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaKeypair.boot_and_delete_server_with_keypair:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- server_kwargs:
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_neutron() }}
- {{ unlimited_nova(keypairs=true) }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_server_from_volume_and_delete:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- volume_size: 5
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_volumes() }}
- {{ unlimited_neutron() }}
- {{ unlimited_nova() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.pause_and_unpause_server:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- force_delete: false
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_neutron() }}
- {{ unlimited_nova() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaSecGroup.boot_and_delete_server_with_secgroups:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- security_group_count: 10
- rules_per_security_group: 10
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_nova() }}
- {{ unlimited_neutron(secgroups=true) }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_and_migrate_server:
- - args:
- {{ vm_params(image_name, flavor_name) }}
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/opnfv-authenticate.yaml b/testcases/OpenStack/rally/scenario/opnfv-authenticate.yaml
deleted file mode 100644
index a04e4c1c1..000000000
--- a/testcases/OpenStack/rally/scenario/opnfv-authenticate.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
- Authenticate.keystone:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- Authenticate.validate_cinder:
- -
- args:
- repetitions: 2
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- Authenticate.validate_glance:
- -
- args:
- repetitions: 2
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- Authenticate.validate_heat:
- -
- args:
- repetitions: 2
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- Authenticate.validate_neutron:
- -
- args:
- repetitions: 2
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- Authenticate.validate_nova:
- -
- args:
- repetitions: 2
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/opnfv-glance.yaml b/testcases/OpenStack/rally/scenario/opnfv-glance.yaml
deleted file mode 100644
index 3a67e7457..000000000
--- a/testcases/OpenStack/rally/scenario/opnfv-glance.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
- GlanceImages.create_and_delete_image:
- -
- args:
- {{ glance_args(location=glance_image_location, type=glance_image_format) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- GlanceImages.create_and_list_image:
- -
- args:
- {{ glance_args(location=glance_image_location, type=glance_image_format) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- GlanceImages.list_images:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- GlanceImages.create_image_and_boot_instances:
- -
- args:
- {{ glance_args(location=glance_image_location, type=glance_image_format) }}
- flavor:
- name: {{ flavor_name }}
- number_instances: 2
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- quotas:
- {{ unlimited_nova() }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
diff --git a/testcases/OpenStack/rally/scenario/opnfv-keystone.yaml b/testcases/OpenStack/rally/scenario/opnfv-keystone.yaml
deleted file mode 100644
index bfc9948b3..000000000
--- a/testcases/OpenStack/rally/scenario/opnfv-keystone.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
- KeystoneBasic.add_and_remove_user_role:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.create_add_and_list_user_roles:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.create_and_list_tenants:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.create_and_delete_role:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.create_and_delete_service:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.get_entities:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.create_update_and_delete_tenant:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.create_user:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.create_tenant:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.create_and_list_users:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- KeystoneBasic.create_tenant_with_users:
- -
- args:
- users_per_tenant: 10
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/opnfv-quotas.yaml b/testcases/OpenStack/rally/scenario/opnfv-quotas.yaml
deleted file mode 100644
index a0682acce..000000000
--- a/testcases/OpenStack/rally/scenario/opnfv-quotas.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
- Quotas.cinder_update_and_delete:
- -
- args:
- max_quota: 1024
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- Quotas.cinder_update:
- -
- args:
- max_quota: 1024
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- Quotas.neutron_update:
- -
- args:
- max_quota: 1024
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- Quotas.nova_update_and_delete:
- -
- args:
- max_quota: 1024
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- Quotas.nova_update:
- -
- args:
- max_quota: 1024
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/opnfv-requests.yaml b/testcases/OpenStack/rally/scenario/opnfv-requests.yaml
deleted file mode 100644
index 161369786..000000000
--- a/testcases/OpenStack/rally/scenario/opnfv-requests.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
- HttpRequests.check_request:
- -
- args:
- url: "{{ request_url }}"
- method: "GET"
- status_code: 200
- allow_redirects: True
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/opnfv-vm.yaml b/testcases/OpenStack/rally/scenario/opnfv-vm.yaml
deleted file mode 100644
index 74f509925..000000000
--- a/testcases/OpenStack/rally/scenario/opnfv-vm.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
- VMTasks.boot_runcommand_delete:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- floating_network: {{ floating_network }}
- force_delete: false
- command:
- interpreter: /bin/sh
- script_file: {{ sup_dir }}/instance_dd_test.sh
- username: cirros
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- fixed_network: private
- floating_network: {{ floating_network }}
- force_delete: false
- command:
- interpreter: /bin/sh
- script_file: {{ sup_dir }}/instance_dd_test.sh
- use_floatingip: true
- username: cirros
- nics:
- - net-id: {{ netid }}
- volume_args:
- size: 2
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/sanity/opnfv-cinder.yaml b/testcases/OpenStack/rally/scenario/sanity/opnfv-cinder.yaml
deleted file mode 100644
index 5962b1db5..000000000
--- a/testcases/OpenStack/rally/scenario/sanity/opnfv-cinder.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
- CinderVolumes.create_and_delete_snapshot:
- -
- args:
- force: false
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {{ volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_and_delete_volume:
- -
- args:
- size:
- max: 1
- min: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- {{ vm_params(image_name,none,1) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
- -
- args:
- size: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_and_extend_volume:
- -
- args:
- new_size: 2
- size: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumes.create_from_volume_and_delete_volume:
- -
- args:
- size: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- {{ unlimited_volumes() }}
- {{ volumes() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/sanity/opnfv-heat.yaml b/testcases/OpenStack/rally/scenario/sanity/opnfv-heat.yaml
deleted file mode 100644
index dc34cc3f2..000000000
--- a/testcases/OpenStack/rally/scenario/sanity/opnfv-heat.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
- HeatStacks.create_update_delete_stack:
- -
- args:
- template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template"
- updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- HeatStacks.create_check_delete_stack:
- -
- args:
- template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- HeatStacks.create_suspend_resume_delete_stack:
- -
- args:
- template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- HeatStacks.list_stacks_and_resources:
- -
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/sanity/opnfv-neutron.yaml b/testcases/OpenStack/rally/scenario/sanity/opnfv-neutron.yaml
deleted file mode 100644
index 159f2b633..000000000
--- a/testcases/OpenStack/rally/scenario/sanity/opnfv-neutron.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
- NeutronNetworks.create_and_delete_networks:
- -
- args:
- network_create_args: {}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- neutron:
- network: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_delete_ports:
- -
- args:
- network_create_args: {}
- port_create_args: {}
- ports_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- port: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_delete_routers:
- -
- args:
- network_create_args: {}
- router_create_args: {}
- subnet_cidr_start: "1.1.0.0/30"
- subnet_create_args: {}
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- port: -1
- router: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_delete_subnets:
- -
- args:
- network_create_args: {}
- subnet_cidr_start: "1.1.0.0/30"
- subnet_create_args: {}
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_list_networks:
- -
- args:
- network_create_args: {}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- quotas:
- neutron:
- network: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_list_ports:
- -
- args:
- network_create_args: {}
- port_create_args: {}
- ports_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- port: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_list_routers:
- -
- args:
- network_create_args: {}
- router_create_args: {}
- subnet_cidr_start: "1.1.0.0/30"
- subnet_create_args: {}
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- router: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NeutronNetworks.create_and_list_subnets:
- -
- args:
- network_create_args: {}
- subnet_cidr_start: "1.1.0.0/30"
- subnet_create_args: {}
- subnets_per_network: 1
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- quotas:
- neutron:
- network: -1
- subnet: -1
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/sanity/opnfv-nova.yaml b/testcases/OpenStack/rally/scenario/sanity/opnfv-nova.yaml
deleted file mode 100644
index e2795cf71..000000000
--- a/testcases/OpenStack/rally/scenario/sanity/opnfv-nova.yaml
+++ /dev/null
@@ -1,140 +0,0 @@
- NovaServers.boot_and_live_migrate_server:
- - args:
- {{ vm_params(image_name, flavor_name) }}
- block_migration: false
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_server_attach_created_volume_and_live_migrate:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- size: 10
- block_migration: false
- boot_server_kwargs:
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_server_from_volume_and_live_migrate:
- - args:
- {{ vm_params(image_name, flavor_name) }}
- block_migration: false
- volume_size: 10
- force_delete: false
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaKeypair.boot_and_delete_server_with_keypair:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- server_kwargs:
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_neutron() }}
- {{ unlimited_nova(keypairs=true) }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_server_from_volume_and_delete:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- volume_size: 5
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_volumes() }}
- {{ unlimited_neutron() }}
- {{ unlimited_nova() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.pause_and_unpause_server:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- force_delete: false
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_neutron() }}
- {{ unlimited_nova() }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaSecGroup.boot_and_delete_server_with_secgroups:
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- security_group_count: 10
- rules_per_security_group: 10
- nics:
- - net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- start_cidr: "100.1.0.0/25"
- quotas:
- {{ unlimited_nova() }}
- {{ unlimited_neutron(secgroups=true) }}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- NovaServers.boot_and_migrate_server:
- - args:
- {{ vm_params(image_name, flavor_name) }}
- nics:
- - net-id: {{ netid }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/testcases/OpenStack/rally/scenario/support/instance_dd_test.sh b/testcases/OpenStack/rally/scenario/support/instance_dd_test.sh
deleted file mode 100755
index e3bf23405..000000000
--- a/testcases/OpenStack/rally/scenario/support/instance_dd_test.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; }
-file=/tmp/test.img
-c=${1:-$SIZE}
-c=${c:-1000} #default is 1GB
-write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c")
-read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c")
-[ -f $file ] && rm $file
-
-echo "{
- \"write_seq_${c}m\": $write_seq,
- \"read_seq_${c}m\": $read_seq
- }"
diff --git a/testcases/OpenStack/rally/scenario/templates/autoscaling_policy.yaml.template b/testcases/OpenStack/rally/scenario/templates/autoscaling_policy.yaml.template
deleted file mode 100644
index a22487e33..000000000
--- a/testcases/OpenStack/rally/scenario/templates/autoscaling_policy.yaml.template
+++ /dev/null
@@ -1,17 +0,0 @@
-heat_template_version: 2013-05-23
-
-resources:
- test_group:
- type: OS::Heat::AutoScalingGroup
- properties:
- desired_capacity: 0
- max_size: 0
- min_size: 0
- resource:
- type: OS::Heat::RandomString
- test_policy:
- type: OS::Heat::ScalingPolicy
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: { get_resource: test_group }
- scaling_adjustment: 1 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/scenario/templates/default.yaml.template b/testcases/OpenStack/rally/scenario/templates/default.yaml.template
deleted file mode 100644
index eb4f2f2dd..000000000
--- a/testcases/OpenStack/rally/scenario/templates/default.yaml.template
+++ /dev/null
@@ -1 +0,0 @@
-heat_template_version: 2014-10-16 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/scenario/templates/random_strings.yaml.template b/testcases/OpenStack/rally/scenario/templates/random_strings.yaml.template
deleted file mode 100644
index 2dd676c11..000000000
--- a/testcases/OpenStack/rally/scenario/templates/random_strings.yaml.template
+++ /dev/null
@@ -1,13 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: Test template for rally create-update-delete scenario
-
-resources:
- test_string_one:
- type: OS::Heat::RandomString
- properties:
- length: 20
- test_string_two:
- type: OS::Heat::RandomString
- properties:
- length: 20 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/scenario/templates/resource_group.yaml.template b/testcases/OpenStack/rally/scenario/templates/resource_group.yaml.template
deleted file mode 100644
index b3f505fa6..000000000
--- a/testcases/OpenStack/rally/scenario/templates/resource_group.yaml.template
+++ /dev/null
@@ -1,13 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: Test template for rally create-update-delete scenario
-
-resources:
- test_group:
- type: OS::Heat::ResourceGroup
- properties:
- count: 2
- resource_def:
- type: OS::Heat::RandomString
- properties:
- length: 20 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/scenario/templates/server_with_ports.yaml.template b/testcases/OpenStack/rally/scenario/templates/server_with_ports.yaml.template
deleted file mode 100644
index 909f45d21..000000000
--- a/testcases/OpenStack/rally/scenario/templates/server_with_ports.yaml.template
+++ /dev/null
@@ -1,64 +0,0 @@
-heat_template_version: 2013-05-23
-
-parameters:
- # set all correct defaults for parameters before launch test
- public_net:
- type: string
- default: public
- image:
- type: string
- default: cirros-0.3.4-x86_64-uec
- flavor:
- type: string
- default: m1.tiny
- cidr:
- type: string
- default: 11.11.11.0/24
-
-resources:
- server:
- type: OS::Nova::Server
- properties:
- image: {get_param: image}
- flavor: {get_param: flavor}
- networks:
- - port: { get_resource: server_port }
-
- router:
- type: OS::Neutron::Router
- properties:
- external_gateway_info:
- network: {get_param: public_net}
-
- router_interface:
- type: OS::Neutron::RouterInterface
- properties:
- router_id: { get_resource: router }
- subnet_id: { get_resource: private_subnet }
-
- private_net:
- type: OS::Neutron::Net
-
- private_subnet:
- type: OS::Neutron::Subnet
- properties:
- network: { get_resource: private_net }
- cidr: {get_param: cidr}
-
- port_security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- name: default_port_security_group
- description: >
- Default security group assigned to port. The neutron default group is not
- used because neutron creates several groups with the same name=default and
- nova cannot chooses which one should it use.
-
- server_port:
- type: OS::Neutron::Port
- properties:
- network: {get_resource: private_net}
- fixed_ips:
- - subnet: { get_resource: private_subnet }
- security_groups:
- - { get_resource: port_security_group }
diff --git a/testcases/OpenStack/rally/scenario/templates/server_with_volume.yaml.template b/testcases/OpenStack/rally/scenario/templates/server_with_volume.yaml.template
deleted file mode 100644
index 826ca9dae..000000000
--- a/testcases/OpenStack/rally/scenario/templates/server_with_volume.yaml.template
+++ /dev/null
@@ -1,43 +0,0 @@
-heat_template_version: 2013-05-23
-
-parameters:
- # set all correct defaults for parameters before launch test
- image:
- type: string
- default: cirros-0.3.4-x86_64-uec
- flavor:
- type: string
- default: m1.tiny
- availability_zone:
- type: string
- description: The Availability Zone to launch the instance.
- default: nova
- volume_size:
- type: number
- description: Size of the volume to be created.
- default: 1
- constraints:
- - range: { min: 1, max: 1024 }
- description: must be between 1 and 1024 Gb.
- network_id:
- type: string
-
-resources:
- server:
- type: OS::Nova::Server
- properties:
- image: {get_param: image}
- flavor: {get_param: flavor}
- networks:
- - network: { get_param: network_id }
- cinder_volume:
- type: OS::Cinder::Volume
- properties:
- size: { get_param: volume_size }
- availability_zone: { get_param: availability_zone }
- volume_attachment:
- type: OS::Cinder::VolumeAttachment
- properties:
- volume_id: { get_resource: cinder_volume }
- instance_uuid: { get_resource: server}
- mountpoint: /dev/vdc
diff --git a/testcases/OpenStack/rally/scenario/templates/updated_autoscaling_policy_inplace.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_autoscaling_policy_inplace.yaml.template
deleted file mode 100644
index cf34879ca..000000000
--- a/testcases/OpenStack/rally/scenario/templates/updated_autoscaling_policy_inplace.yaml.template
+++ /dev/null
@@ -1,23 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- Test template for create-update-delete-stack scenario in rally.
- The template updates resource parameters without resource re-creation(replacement)
- in the stack defined by autoscaling_policy.yaml.template. It allows to measure
- performance of "pure" resource update operation only.
-
-resources:
- test_group:
- type: OS::Heat::AutoScalingGroup
- properties:
- desired_capacity: 0
- max_size: 0
- min_size: 0
- resource:
- type: OS::Heat::RandomString
- test_policy:
- type: OS::Heat::ScalingPolicy
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: { get_resource: test_group }
- scaling_adjustment: -1 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/scenario/templates/updated_random_strings_add.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_random_strings_add.yaml.template
deleted file mode 100644
index e06d42e01..000000000
--- a/testcases/OpenStack/rally/scenario/templates/updated_random_strings_add.yaml.template
+++ /dev/null
@@ -1,19 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Test template for create-update-delete-stack scenario in rally.
- The template updates the stack defined by random_strings.yaml.template with additional resource.
-
-resources:
- test_string_one:
- type: OS::Heat::RandomString
- properties:
- length: 20
- test_string_two:
- type: OS::Heat::RandomString
- properties:
- length: 20
- test_string_three:
- type: OS::Heat::RandomString
- properties:
- length: 20 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/scenario/templates/updated_random_strings_delete.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_random_strings_delete.yaml.template
deleted file mode 100644
index d02593e3b..000000000
--- a/testcases/OpenStack/rally/scenario/templates/updated_random_strings_delete.yaml.template
+++ /dev/null
@@ -1,11 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Test template for create-update-delete-stack scenario in rally.
- The template deletes one resource from the stack defined by random_strings.yaml.template.
-
-resources:
- test_string_one:
- type: OS::Heat::RandomString
- properties:
- length: 20 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/scenario/templates/updated_random_strings_replace.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_random_strings_replace.yaml.template
deleted file mode 100644
index 46d8bff4c..000000000
--- a/testcases/OpenStack/rally/scenario/templates/updated_random_strings_replace.yaml.template
+++ /dev/null
@@ -1,19 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Test template for create-update-delete-stack scenario in rally.
- The template deletes one resource from the stack defined by
- random_strings.yaml.template and re-creates it with the updated parameters
- (so-called update-replace). That happens because some parameters cannot be
- changed without resource re-creation. The template allows to measure performance
- of update-replace operation.
-
-resources:
- test_string_one:
- type: OS::Heat::RandomString
- properties:
- length: 20
- test_string_two:
- type: OS::Heat::RandomString
- properties:
- length: 40 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/scenario/templates/updated_resource_group_increase.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_resource_group_increase.yaml.template
deleted file mode 100644
index 891074ebc..000000000
--- a/testcases/OpenStack/rally/scenario/templates/updated_resource_group_increase.yaml.template
+++ /dev/null
@@ -1,16 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Test template for create-update-delete-stack scenario in rally.
- The template updates one resource from the stack defined by resource_group.yaml.template
- and adds children resources to that resource.
-
-resources:
- test_group:
- type: OS::Heat::ResourceGroup
- properties:
- count: 3
- resource_def:
- type: OS::Heat::RandomString
- properties:
- length: 20 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/scenario/templates/updated_resource_group_reduce.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_resource_group_reduce.yaml.template
deleted file mode 100644
index b4d1d1730..000000000
--- a/testcases/OpenStack/rally/scenario/templates/updated_resource_group_reduce.yaml.template
+++ /dev/null
@@ -1,16 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Test template for create-update-delete-stack scenario in rally.
- The template updates one resource from the stack defined by resource_group.yaml.template
- and deletes children resources from that resource.
-
-resources:
- test_group:
- type: OS::Heat::ResourceGroup
- properties:
- count: 1
- resource_def:
- type: OS::Heat::RandomString
- properties:
- length: 20 \ No newline at end of file
diff --git a/testcases/OpenStack/rally/task.yaml b/testcases/OpenStack/rally/task.yaml
deleted file mode 100644
index c482f120d..000000000
--- a/testcases/OpenStack/rally/task.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-{%- if smoke %}
-{%- set users_amount = 1 %}
-{%- set tenants_amount = 1 %}
-{%- endif %}
-
-{%- from "macro/macro.yaml" import user_context, vm_params, unlimited_volumes, constant_runner, rps_runner, no_failures_sla -%}
-{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args -%}
-
----
-{% if "authenticate" in service_list %}
-{%- include "var/opnfv-authenticate.yaml"-%}
-{% endif %}
-
-{% if "cinder" in service_list %}
-{%- include "var/opnfv-cinder.yaml"-%}
-{% endif %}
-
-{% if "keystone" in service_list %}
-{%- include "var/opnfv-keystone.yaml"-%}
-{% endif %}
-
-{% if "nova" in service_list %}
-{%- include "var/opnfv-nova.yaml"-%}
-{% endif %}
-
-{% if "glance" in service_list %}
-{%- include "var/opnfv-glance.yaml"-%}
-{% endif %}
-
-{% if "neutron" in service_list %}
-{%- include "var/opnfv-neutron.yaml"-%}
-{% endif %}
-
-{% if "quotas" in service_list %}
-{%- include "var/opnfv-quotas.yaml"-%}
-{% endif %}
-
-{% if "requests" in service_list %}
-{%- include "var/opnfv-requests.yaml"-%}
-{% endif %}
-
-{% if "heat" in service_list %}
-{%- include "var/opnfv-heat.yaml"-%}
-{% endif %}
-
-{% if "vm" in service_list %}
-{%- include "var/opnfv-vm.yaml"-%}
-{% endif %}
diff --git a/testcases/OpenStack/tempest/custom_tests/blacklist.txt b/testcases/OpenStack/tempest/custom_tests/blacklist.txt
deleted file mode 100644
index 5c8581f66..000000000
--- a/testcases/OpenStack/tempest/custom_tests/blacklist.txt
+++ /dev/null
@@ -1,96 +0,0 @@
--
- scenarios:
- - os-odl_l2-bgpvpn-ha
- - os-odl_l2-bgpvpn-noha
- installers:
- - fuel
- - apex
- tests:
- - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers
- - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details
- - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers
- - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details
- - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard
- - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_list_show_update_delete_floating_ip
- - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_floating_ip_specifying_a_fixed_ip_address
- - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
- - tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern
-
--
- scenarios:
- - os-odl_l2-nofeature-ha
- - os-odl_l2-nofeature-noha
- - os-nosdn-nofeature-ha
- - os-nosdn-nofeature-noha
- installers:
- - joid
- tests:
- - tempest.api.object_storage
-
--
- scenarios:
- - os-nosdn-lxd-ha
- - os-nosdn-lxd-noha
- installers:
- - joid
- tests:
- - tempest.api.object_storage
- - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
- - tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern
-
--
- scenarios:
- - os-onos-nofeature-ha
- - os-onos-nofeature-noha
- - os-onos-sfc-ha
- - os-onos-sfc-noha
- installers:
- - fuel
- - apex
- - compass
- tests:
- - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard
- - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
- - tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern
-
--
- scenarios:
- - os-onos-nofeature-ha
- - os-onos-nofeature-noha
- - os-onos-sfc-ha
- - os-onos-sfc-noha
- installers:
- - joid
- tests:
- - tempest.api.object_storage
- - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard
- - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
- - tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern
-
--
- # https://bugs.launchpad.net/tempest/+bug/1586931
- scenarios:
- - os-odl_l2-nofeature-ha
- - os-odl_l2-nofeature-noha
- - os-odl_l2-sfc-ha
- - os-odl_l2-sfc-noha
- - os-odl_l3-nofeature-ha
- - os-odl_l3-nofeature-noha
- - os-nosdn-kvm-ha
- - os-nosdn-kvm-noha
- - os-nosdn-nofeature-ha
- - os-nosdn-nofeature-noha
- - os-nosdn-ovs-ha
- - os-nosdn-ovs-noha
- installers:
- - fuel
- tests:
- - tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
diff --git a/testcases/OpenStack/tempest/custom_tests/defcore_req.txt b/testcases/OpenStack/tempest/custom_tests/defcore_req.txt
deleted file mode 100644
index bb1d172df..000000000
--- a/testcases/OpenStack/tempest/custom_tests/defcore_req.txt
+++ /dev/null
@@ -1,122 +0,0 @@
-# Set of DefCore tempest test cases (see http://www.openstack.org/brand/interop)
-# This approved version (2016.01) is valid for Juno, Kilo, and Liberty releases of OpenStack
-# The list is stored at http://git.openstack.org/cgit/openstack/defcore/plain/2016.01/2016.01.required.txt
-tempest.api.compute.images.test_images.ImagesTestJSON.test_delete_saving_image[id-aa06b52b-2db5-4807-b218-9441f75d74e3]
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314]
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_changes_since[id-18bac3ae-da27-436c-92a9-b22474d13aab]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_name[id-33163b73-79f5-4d07-a7ea-9213bcc468ff]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_id[id-9f238683-c763-45aa-b848-232ec3ce3105]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_ref[id-05a377b8-28cf-4734-a1e6-2ab5c38bf606]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_status[id-a3f5b513-aeb3-42a9-b18e-f091ef73254d]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_type[id-e3356918-4d3e-4756-81d5-abc4524ba29f]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_limit_results[id-3a484ca9-67ba-451e-b494-7fcf28d32d62]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_changes_since[id-7d439e18-ac2e-4827-b049-7e18004712c4]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_name[id-644ea267-9bd9-4f3b-af9f-dffa02396a17]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_server_ref[id-8c78f822-203b-4bf6-8bba-56ebd551cf84]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_status[id-9b0ea018-6185-4f71-948a-a123a107988e]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_type[id-888c0cc0-7223-43c5-9db0-b125fd0a393b]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_limit_results[id-ba2fa9a9-b672-47cc-b354-3b4c0600e2cb]
-tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image[id-490d0898-e12a-463f-aef0-c50156b9f789]
-tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images[id-fd51b7f4-d4a3-4331-9885-866658112a6f]
-tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail[id-9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c]
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image[id-b3304c3b-97df-46d2-8cd3-e2b6659724e7]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip[id-43a1242e-7b31-48d1-88f2-3f72aa9f2077]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip_regex[id-a905e287-c35e-42f2-b132-d02b09f3654a]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits[id-12c80a9f-2dec-480e-882b-98ba15757659]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f,negative]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32,smoke]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm[id-1499262a-9328-4eda-9068-db1ac57498d2]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert[id-c03aab19-adb1-44f5-917d-c419577e9e68]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata[id-479da087-92b3-4dcf-aeb3-fd293b2d14ce]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata[id-211021f6-21de-4657-a68f-908878cfe251]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item[id-58c02d4f-5c67-40be-8744-d3fa5982eb1c]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata[id-344d981e-0c33-4997-8a5d-6c1d803e4134]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password[id-b92d5ec7-b1dd-44a2-87e4-45e888c46ef0]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair[id-f9e15296-d7f9-4e62-b53f-a04e89160833]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_non_existent_flavor[id-ced1a1d7-2ab6-45c9-b90f-b27d87b30efd,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_null_flavor[id-45436a7d-a388-4a35-a9d8-3adc5d0d940b,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_delete_a_server_of_another_tenant[id-5c75009d-3eea-423e-bea3-61b09fd25f9c,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_update_server_of_another_tenant[id-543d84c1-dd2e-4c6d-8cb2-b9da0efaa384,negative]
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a]
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107]
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff]
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513]
-tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list[id-bc2dd1a0-15af-48e5-9990-f2e75a48325d]
-tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_with_details[id-bad0567a-5a4f-420b-851e-780b55bb867c]
-tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_invalid_volume_id[id-f01904f2-e975-4915-98ce-cb5fa27bde4f,negative]
-tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id[id-62bab09a-4c03-4617-8cca-8572bc94af9b,negative]
-tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token[id-6f8e4436-fc96-4282-8122-e41df57197a9]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v1.test_images.ListImagesTest.test_index_no_params[id-246178ab-3b33-4212-9a4b-a7fe8261794d]
-tempest.api.object_storage.test_object_expiry.ObjectExpiryTest.test_get_object_after_expiry_time[id-fb024a42-37f3-4ba5-9684-4f40a7910b41]
-tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way[id-06f90388-2d0e-40aa-934c-e9a8833e958a]
-tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers[id-aa467252-44f3-472a-b5ae-5b57c3c9c147]
-tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container[id-1a9ab572-1b66-4981-8c21-416e2a5e6011]
-tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself[id-2248abba-415d-410b-9c30-22dff9cd6e67]
-tempest.api.object_storage.test_object_services.ObjectTest.test_create_object[id-5b4ce26f-3545-46c9-a2ba-5754358a4c62,smoke]
-tempest.api.object_storage.test_object_services.ObjectTest.test_delete_object[id-17738d45-03bd-4d45-9e0b-7b2f58f98687]
-tempest.api.object_storage.test_object_services.ObjectTest.test_get_object[id-02610ba7-86b7-4272-9ed8-aa8d417cb3cd,smoke]
-tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_if_different[id-50d01f12-526f-4360-9ac2-75dd508d7b68]
-tempest.api.object_storage.test_object_services.ObjectTest.test_object_upload_in_segments[id-e3e6a64a-9f50-4955-b987-6ce6767c97fb]
-tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_get_object_using_temp_url[id-f91c96d4-1230-4bba-8eb9-84476d18d991]
-tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_put_object_using_temp_url[id-9b08dade-3571-4152-8a4f-a4f2a873a735]
-tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container[id-a151e158-dcbf-4a1f-a1e7-46cd65895a6f]
diff --git a/testcases/OpenStack/tempest/gen_tempest_conf.py b/testcases/OpenStack/tempest/gen_tempest_conf.py
deleted file mode 100755
index ca671d00d..000000000
--- a/testcases/OpenStack/tempest/gen_tempest_conf.py
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Execute Multisite Tempest test cases
-##
-
-import ConfigParser
-import os
-import re
-import shutil
-import functest.utils.functest_utils as ft_utils
-import functest.utils.functest_logger as ft_logger
-from run_tempest import configure_tempest
-from run_tempest import TEMPEST_RESULTS_DIR
-
-logger = ft_logger.Logger("multisite").getLogger()
-
-
-def configure_tempest_multisite(deployment_dir):
- """
- Add/update needed parameters into tempest.conf file generated by Rally
- """
- logger.debug("configure the tempest")
- configure_tempest(deployment_dir)
-
- logger.debug("Finding tempest.conf file...")
- tempest_conf_file = deployment_dir + "/tempest.conf"
- if not os.path.isfile(tempest_conf_file):
- logger.error("Tempest configuration file %s NOT found."
- % tempest_conf_file)
- exit(-1)
-
- # Copy tempest.conf to /home/opnfv/functest/results/tempest/
- cur_path = os.path.split(os.path.realpath(__file__))[0]
- shutil.copyfile(tempest_conf_file, cur_path + '/tempest_multisite.conf')
- tempest_conf_file = cur_path + "/tempest_multisite.conf"
-
- logger.debug("Updating selected tempest.conf parameters...")
- config = ConfigParser.RawConfigParser()
- config.read(tempest_conf_file)
-
- config.set('service_available', 'kingbird', 'true')
- cmd = "openstack endpoint show kingbird | grep publicurl |\
- awk '{print $4}' | awk -F '/' '{print $4}'"
- kingbird_api_version = os.popen(cmd).read()
- if os.environ.get("INSTALLER_TYPE") == 'fuel':
- # For MOS based setup, the service is accessible
- # via bind host
- kingbird_conf_path = "/etc/kingbird/kingbird.conf"
- installer_type = os.getenv('INSTALLER_TYPE', 'Unknown')
- installer_ip = os.getenv('INSTALLER_IP', 'Unknown')
- installer_username = ft_utils.get_functest_config(
- "multisite." + installer_type +
- "_environment.installer_username")
- installer_password = ft_utils.get_functest_config(
- "multisite." + installer_type +
- "_environment.installer_password")
-
- ssh_options = "-o UserKnownHostsFile=/dev/null -o \
- StrictHostKeyChecking=no"
-
- # Get the controller IP from the fuel node
- cmd = 'sshpass -p %s ssh 2>/dev/null %s %s@%s \
- \'fuel node --env 1| grep controller | grep "True\| 1" \
- | awk -F\| "{print \$5}"\'' % (installer_password,
- ssh_options,
- installer_username,
- installer_ip)
- multisite_controller_ip = \
- "".join(os.popen(cmd).read().split())
-
- # Login to controller and get bind host details
- cmd = 'sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" \
- grep -e "^bind_" %s \\""' % (installer_password,
- ssh_options,
- installer_username,
- installer_ip,
- multisite_controller_ip,
- kingbird_conf_path)
- bind_details = os.popen(cmd).read()
- bind_details = "".join(bind_details.split())
- # Extract port number from the bind details
- bind_port = re.findall(r"\D(\d{4})", bind_details)[0]
- # Extract ip address from the bind details
- bind_host = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}",
- bind_details)[0]
- kingbird_endpoint_url = "http://" + bind_host + ":" + bind_port + \
- "/"
- else:
- cmd = "openstack endpoint show kingbird | grep publicurl |\
- awk '{print $4}' | awk -F '/' '{print $3}'"
- kingbird_endpoint_url = os.popen(cmd).read()
-
- try:
- config.add_section("kingbird")
- except Exception:
- logger.info('kingbird section exist')
- config.set('kingbird', 'endpoint_type', 'publicURL')
- config.set('kingbird', 'TIME_TO_SYNC', '20')
- config.set('kingbird', 'endpoint_url', kingbird_endpoint_url)
- config.set('kingbird', 'api_version', kingbird_api_version)
- with open(tempest_conf_file, 'wb') as config_file:
- config.write(config_file)
-
- return True
-
-
-def main():
-
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
-
- deployment_dir = ft_utils.get_deployment_dir()
- configure_tempest_multisite(deployment_dir)
-
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/OpenStack/tempest/run_tempest.py b/testcases/OpenStack/tempest/run_tempest.py
deleted file mode 100755
index d2c01c604..000000000
--- a/testcases/OpenStack/tempest/run_tempest.py
+++ /dev/null
@@ -1,471 +0,0 @@
-#!/usr/bin/env python
-#
-# Description:
-# Runs tempest and pushes the results to the DB
-#
-# Authors:
-# morgan.richomme@orange.com
-# jose.lausuch@ericsson.com
-# viktor.tikkanen@nokia.com
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import ConfigParser
-import os
-import re
-import shutil
-import subprocess
-import sys
-import time
-
-import argparse
-import yaml
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-
-modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing',
- 'identity', 'image', 'network', 'object_storage', 'orchestration',
- 'telemetry', 'volume', 'custom', 'defcore', 'feature_multisite']
-
-""" tests configuration """
-parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug",
- help="Debug mode",
- action="store_true")
-parser.add_argument("-s", "--serial",
- help="Run tests in one thread",
- action="store_true")
-parser.add_argument("-m", "--mode",
- help="Tempest test mode [smoke, all]",
- default="smoke")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-parser.add_argument("-n", "--noclean",
- help="Don't clean the created resources for this test.",
- action="store_true")
-parser.add_argument("-c", "--conf",
- help="User-specified Tempest config file location",
- default="")
-
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("run_tempest").getLogger()
-
-TEST_DB = ft_utils.get_functest_config('results.test_db_url')
-
-MODE = "smoke"
-GLANCE_IMAGE_NAME = \
- ft_utils.get_functest_config('general.openstack.image_name')
-GLANCE_IMAGE_FILENAME = \
- ft_utils.get_functest_config('general.openstack.image_file_name')
-GLANCE_IMAGE_FORMAT = \
- ft_utils.get_functest_config('general.openstack.image_disk_format')
-GLANCE_IMAGE_PATH = \
- ft_utils.get_functest_config('general.directories.dir_functest_data') + \
- "/" + GLANCE_IMAGE_FILENAME
-IMAGE_ID = None
-IMAGE_ID_ALT = None
-
-FLAVOR_NAME = \
- ft_utils.get_functest_config('general.openstack.flavor_name')
-FLAVOR_RAM = ft_utils.get_functest_config('general.openstack.flavor_ram')
-FLAVOR_DISK = ft_utils.get_functest_config('general.openstack.flavor_disk')
-FLAVOR_VCPUS = ft_utils.get_functest_config('general.openstack.flavor_vcpus')
-FLAVOR_ID = None
-FLAVOR_ID_ALT = None
-
-PRIVATE_NET_NAME = \
- ft_utils.get_functest_config('tempest.private_net_name')
-PRIVATE_SUBNET_NAME = \
- ft_utils.get_functest_config('tempest.private_subnet_name')
-PRIVATE_SUBNET_CIDR = \
- ft_utils.get_functest_config('tempest.private_subnet_cidr')
-ROUTER_NAME = \
- ft_utils.get_functest_config('tempest.router_name')
-TENANT_NAME = \
- ft_utils.get_functest_config('tempest.identity.tenant_name')
-TENANT_DESCRIPTION = \
- ft_utils.get_functest_config('tempest.identity.tenant_description')
-USER_NAME = \
- ft_utils.get_functest_config('tempest.identity.user_name')
-USER_PASSWORD = \
- ft_utils.get_functest_config('tempest.identity.user_password')
-SSH_TIMEOUT = \
- ft_utils.get_functest_config('tempest.validation.ssh_timeout')
-USE_CUSTOM_IMAGES = \
- ft_utils.get_functest_config('tempest.use_custom_images')
-USE_CUSTOM_FLAVORS = \
- ft_utils.get_functest_config('tempest.use_custom_flavors')
-
-DEPLOYMENT_MAME = \
- ft_utils.get_functest_config('rally.deployment_name')
-RALLY_INSTALLATION_DIR = \
- ft_utils.get_functest_config('general.directories.dir_rally_inst')
-
-RESULTS_DIR = \
- ft_utils.get_functest_config('general.directories.dir_results')
-TEMPEST_RESULTS_DIR = RESULTS_DIR + '/tempest'
-
-REPO_PATH = ft_utils.FUNCTEST_REPO + '/'
-TEST_LIST_DIR = \
- ft_utils.get_functest_config('general.directories.dir_tempest_cases')
-TEMPEST_CUSTOM = REPO_PATH + TEST_LIST_DIR + 'test_list.txt'
-TEMPEST_BLACKLIST = REPO_PATH + TEST_LIST_DIR + 'blacklist.txt'
-TEMPEST_DEFCORE = REPO_PATH + TEST_LIST_DIR + 'defcore_req.txt'
-TEMPEST_RAW_LIST = TEMPEST_RESULTS_DIR + '/test_raw_list.txt'
-TEMPEST_LIST = TEMPEST_RESULTS_DIR + '/test_list.txt'
-
-
-def get_info(file_result):
- test_run = ""
- duration = ""
- test_failed = ""
-
- p = subprocess.Popen('cat tempest.log',
- shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- for line in p.stdout.readlines():
- # print line,
- if (len(test_run) < 1):
- test_run = re.findall("[0-9]*\.[0-9]*s", line)
- if (len(duration) < 1):
- duration = re.findall("[0-9]*\ tests", line)
- regexp = r"(failures=[0-9]+)"
- if (len(test_failed) < 1):
- test_failed = re.findall(regexp, line)
-
- logger.debug("test_run:" + test_run)
- logger.debug("duration:" + duration)
-
-
-def create_tempest_resources():
- keystone_client = os_utils.get_keystone_client()
-
- logger.debug("Creating tenant and user for Tempest suite")
- tenant_id = os_utils.create_tenant(keystone_client,
- TENANT_NAME,
- TENANT_DESCRIPTION)
- if not tenant_id:
- logger.error("Error : Failed to create %s tenant" % TENANT_NAME)
-
- user_id = os_utils.create_user(keystone_client, USER_NAME, USER_PASSWORD,
- None, tenant_id)
- if not user_id:
- logger.error("Error : Failed to create %s user" % USER_NAME)
-
- logger.debug("Creating private network for Tempest suite")
- network_dic = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
- PRIVATE_SUBNET_NAME,
- ROUTER_NAME,
- PRIVATE_SUBNET_CIDR)
- if not network_dic:
- exit(1)
-
- if USE_CUSTOM_IMAGES:
- # adding alternative image should be trivial should we need it
- logger.debug("Creating image for Tempest suite")
- global IMAGE_ID
- _, IMAGE_ID = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
- GLANCE_IMAGE_PATH,
- GLANCE_IMAGE_FORMAT)
- if not IMAGE_ID:
- exit(-1)
-
- if USE_CUSTOM_FLAVORS:
- # adding alternative flavor should be trivial should we need it
- logger.debug("Creating flavor for Tempest suite")
- global FLAVOR_ID
- _, FLAVOR_ID = os_utils.get_or_create_flavor(FLAVOR_NAME,
- FLAVOR_RAM,
- FLAVOR_DISK,
- FLAVOR_VCPUS)
- if not FLAVOR_ID:
- exit(-1)
-
-
-def configure_tempest(deployment_dir):
- """
- Add/update needed parameters into tempest.conf file generated by Rally
- """
-
- tempest_conf_file = deployment_dir + "/tempest.conf"
- if os.path.isfile(tempest_conf_file):
- logger.debug("Deleting old tempest.conf file...")
- os.remove(tempest_conf_file)
-
- logger.debug("Generating new tempest.conf file...")
- cmd = "rally verify genconfig"
- ft_utils.execute_command(cmd)
-
- logger.debug("Finding tempest.conf file...")
- if not os.path.isfile(tempest_conf_file):
- logger.error("Tempest configuration file %s NOT found."
- % tempest_conf_file)
- exit(-1)
-
- logger.debug("Updating selected tempest.conf parameters...")
- config = ConfigParser.RawConfigParser()
- config.read(tempest_conf_file)
- config.set('compute', 'fixed_network_name', PRIVATE_NET_NAME)
- if USE_CUSTOM_IMAGES:
- if IMAGE_ID is not None:
- config.set('compute', 'image_ref', IMAGE_ID)
- if IMAGE_ID_ALT is not None:
- config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
- if USE_CUSTOM_FLAVORS:
- if FLAVOR_ID is not None:
- config.set('compute', 'flavor_ref', FLAVOR_ID)
- if FLAVOR_ID_ALT is not None:
- config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
- config.set('identity', 'tenant_name', TENANT_NAME)
- config.set('identity', 'username', USER_NAME)
- config.set('identity', 'password', USER_PASSWORD)
- config.set('validation', 'ssh_timeout', SSH_TIMEOUT)
-
- if os.getenv('OS_ENDPOINT_TYPE') is not None:
- services_list = ['compute', 'volume', 'image', 'network',
- 'data-processing', 'object-storage', 'orchestration']
- sections = config.sections()
- for service in services_list:
- if service not in sections:
- config.add_section(service)
- config.set(service, 'endpoint_type',
- os.environ.get("OS_ENDPOINT_TYPE"))
-
- with open(tempest_conf_file, 'wb') as config_file:
- config.write(config_file)
-
- # Copy tempest.conf to /home/opnfv/functest/results/tempest/
- shutil.copyfile(tempest_conf_file, TEMPEST_RESULTS_DIR + '/tempest.conf')
- return True
-
-
-def read_file(filename):
- with open(filename) as src:
- return [line.strip() for line in src.readlines()]
-
-
-def generate_test_list(deployment_dir, mode):
- logger.debug("Generating test case list...")
- if mode == 'defcore':
- shutil.copyfile(TEMPEST_DEFCORE, TEMPEST_RAW_LIST)
- elif mode == 'custom':
- if os.path.isfile(TEMPEST_CUSTOM):
- shutil.copyfile(TEMPEST_CUSTOM, TEMPEST_RAW_LIST)
- else:
- logger.error("Tempest test list file %s NOT found."
- % TEMPEST_CUSTOM)
- exit(-1)
- else:
- if mode == 'smoke':
- testr_mode = "smoke"
- elif mode == 'feature_multisite':
- testr_mode = " | grep -i kingbird "
- elif mode == 'full':
- testr_mode = ""
- else:
- testr_mode = 'tempest.api.' + mode
- cmd = ("cd " + deployment_dir + ";" + "testr list-tests " +
- testr_mode + ">" + TEMPEST_RAW_LIST + ";cd")
- ft_utils.execute_command(cmd)
-
-
-def apply_tempest_blacklist():
- logger.debug("Applying tempest blacklist...")
- cases_file = read_file(TEMPEST_RAW_LIST)
- result_file = open(TEMPEST_LIST, 'w')
- black_tests = []
- try:
- installer_type = os.getenv('INSTALLER_TYPE')
- deploy_scenario = os.getenv('DEPLOY_SCENARIO')
- if (bool(installer_type) * bool(deploy_scenario)):
- # if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the file
- black_list_file = open(TEMPEST_BLACKLIST)
- black_list_yaml = yaml.safe_load(black_list_file)
- black_list_file.close()
- for item in black_list_yaml:
- scenarios = item['scenarios']
- installers = item['installers']
- if (deploy_scenario in scenarios and
- installer_type in installers):
- tests = item['tests']
- for test in tests:
- black_tests.append(test)
- break
- except:
- black_tests = []
- logger.debug("Tempest blacklist file does not exist.")
-
- for cases_line in cases_file:
- for black_tests_line in black_tests:
- if black_tests_line in cases_line:
- break
- else:
- result_file.write(str(cases_line) + '\n')
- result_file.close()
-
-
-def run_tempest(OPTION):
- #
- # the "main" function of the script which launches Rally to run Tempest
- # :param option: tempest option (smoke, ..)
- # :return: void
- #
- logger.info("Starting Tempest test suite: '%s'." % OPTION)
- start_time = time.time()
- stop_time = start_time
- cmd_line = "rally verify start " + OPTION + " --system-wide"
-
- header = ("Tempest environment:\n"
- " Installer: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
- (os.getenv('INSTALLER_TYPE', 'Unknown'),
- os.getenv('DEPLOY_SCENARIO', 'Unknown'),
- os.getenv('NODE_NAME', 'Unknown'),
- time.strftime("%a %b %d %H:%M:%S %Z %Y")))
-
- f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+')
- f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+')
- f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+')
- f_env.write(header)
-
- # subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr)
- p = subprocess.Popen(
- cmd_line, shell=True,
- stdout=subprocess.PIPE,
- stderr=f_stderr,
- bufsize=1)
-
- with p.stdout:
- for line in iter(p.stdout.readline, b''):
- if re.search("\} tempest\.", line):
- logger.info(line.replace('\n', ''))
- f_stdout.write(line)
- p.wait()
-
- f_stdout.close()
- f_stderr.close()
- f_env.close()
-
- cmd_line = "rally verify show"
- output = ""
- p = subprocess.Popen(
- cmd_line, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- for line in p.stdout:
- if re.search("Tests\:", line):
- break
- output += line
- logger.info(output)
-
- cmd_line = "rally verify list"
- cmd = os.popen(cmd_line)
- output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|")
- # Format:
- # | UUID | Deployment UUID | smoke | tests | failures | Created at |
- # Duration | Status |
- num_tests = output[4]
- num_failures = output[5]
- time_start = output[6]
- duration = output[7]
- # Compute duration (lets assume it does not take more than 60 min)
- dur_min = int(duration.split(':')[1])
- dur_sec_float = float(duration.split(':')[2])
- dur_sec_int = int(round(dur_sec_float, 0))
- dur_sec_int = dur_sec_int + 60 * dur_min
- stop_time = time.time()
-
- try:
- diff = (int(num_tests) - int(num_failures))
- success_rate = 100 * diff / int(num_tests)
- except:
- success_rate = 0
-
- if 'smoke' in args.mode:
- case_name = 'tempest_smoke_serial'
- elif 'feature' in args.mode:
- case_name = args.mode.replace("feature_", "")
- else:
- case_name = 'tempest_full_parallel'
-
- status = ft_utils.check_success_rate(case_name, success_rate)
- logger.info("Tempest %s success_rate is %s%%, is marked as %s"
- % (case_name, success_rate, status))
-
- # Push results in payload of testcase
- if args.report:
- # add the test in error in the details sections
- # should be possible to do it during the test
- logger.debug("Pushing tempest results into DB...")
- with open(TEMPEST_RESULTS_DIR + "/tempest.log", 'r') as myfile:
- output = myfile.read()
- error_logs = ""
-
- for match in re.findall('(.*?)[. ]*FAILED', output):
- error_logs += match
-
- # Generate json results for DB
- json_results = {"timestart": time_start, "duration": dur_sec_int,
- "tests": int(num_tests), "failures": int(num_failures),
- "errors": error_logs}
- logger.info("Results: " + str(json_results))
- # split Tempest smoke and full
-
- try:
- ft_utils.push_results_to_db("functest",
- case_name,
- start_time,
- stop_time,
- status,
- json_results)
- except:
- logger.error("Error pushing results into Database '%s'"
- % sys.exc_info()[0])
-
- if status == "PASS":
- return 0
- else:
- return -1
-
-
-def main():
- global MODE
-
- if not (args.mode in modes):
- logger.error("Tempest mode not valid. "
- "Possible values are:\n" + str(modes))
- exit(-1)
-
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
-
- deployment_dir = ft_utils.get_deployment_dir()
- create_tempest_resources()
-
- if "" == args.conf:
- MODE = ""
- configure_tempest(deployment_dir)
- else:
- MODE = " --tempest-config " + args.conf
-
- generate_test_list(deployment_dir, args.mode)
- apply_tempest_blacklist()
-
- MODE += " --tests-file " + TEMPEST_LIST
- if args.serial:
- MODE += " --concur 1"
-
- ret_val = run_tempest(MODE)
- if ret_val != 0:
- sys.exit(-1)
-
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/OpenStack/vPing/ping.sh b/testcases/OpenStack/vPing/ping.sh
deleted file mode 100755
index 693b86825..000000000
--- a/testcases/OpenStack/vPing/ping.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-
-while true; do
- ping -c 1 $1 2>&1 >/dev/null
- RES=$?
- if [ "Z$RES" = "Z0" ] ; then
- echo 'vPing OK'
- break
- else
- echo 'vPing KO'
- fi
- sleep 1
-done \ No newline at end of file
diff --git a/testcases/OpenStack/vPing/vping.py b/testcases/OpenStack/vPing/vping.py
deleted file mode 100755
index 90f664568..000000000
--- a/testcases/OpenStack/vPing/vping.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# 0.1: This script boots the VM1 and allocates IP address from Nova
-# Later, the VM2 boots then execute cloud-init to ping VM1.
-# After successful ping, both the VMs are deleted.
-# 0.2: measure test duration and publish results under json format
-# 0.3: adapt push 2 DB after Test API refacroting
-#
-#
-import datetime
-import time
-
-import argparse
-import functest.utils.functest_logger as ft_logger
-
-import vping_util as util
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-parser.add_argument("-m", "--mode", default='ssh',
- help="vPing mode: userdata or ssh",
- action="store")
-
-args = parser.parse_args()
-
-
-def main():
- if args.mode == 'ssh':
- case = 'vping_ssh'
- else:
- case = 'vping_userdata'
-
- logger = ft_logger.Logger(case).getLogger()
-
- util.init(logger)
-
- util.check_repo_exist()
-
- vmname_1 = util.get_vmname_1()
- vmname_2 = util.get_vmname_2()
-
- image_id = util.create_image()
-
- flavor = util.get_flavor()
-
- network_id = util.create_network_full()
-
- sg_id = util.create_security_group()
-
- util.delete_exist_vms()
-
- start_time = time.time()
- logger.info("vPing Start Time:'%s'" % (
- datetime.datetime.fromtimestamp(start_time).strftime(
- '%Y-%m-%d %H:%M:%S')))
-
- vm1 = util.boot_vm(case,
- vmname_1,
- image_id,
- flavor,
- network_id,
- None,
- sg_id)
- test_ip = util.get_test_ip(vm1)
- vm2 = util.boot_vm(case,
- vmname_2,
- image_id,
- flavor,
- network_id,
- test_ip,
- sg_id)
-
- EXIT_CODE, stop_time = util.do_vping(case, vm2, test_ip)
- details = util.check_result(EXIT_CODE,
- start_time,
- stop_time)
- util.push_result(args.report,
- case,
- start_time,
- stop_time,
- details)
-
- exit(EXIT_CODE)
-
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/OpenStack/vPing/vping_util.py b/testcases/OpenStack/vPing/vping_util.py
deleted file mode 100644
index cf5a28dbd..000000000
--- a/testcases/OpenStack/vPing/vping_util.py
+++ /dev/null
@@ -1,461 +0,0 @@
-import os
-import pprint
-import re
-import sys
-import time
-
-import paramiko
-from scp import SCPClient
-
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-FUNCTEST_REPO = ft_utils.FUNCTEST_REPO
-
-NAME_VM_1 = ft_utils.get_functest_config('vping.vm_name_1')
-NAME_VM_2 = ft_utils.get_functest_config('vping.vm_name_2')
-
-VM_BOOT_TIMEOUT = 180
-VM_DELETE_TIMEOUT = 100
-PING_TIMEOUT = ft_utils.get_functest_config('vping.ping_timeout')
-
-GLANCE_IMAGE_NAME = ft_utils.get_functest_config('vping.image_name')
-GLANCE_IMAGE_FILENAME = \
- ft_utils.get_functest_config('general.openstack.image_file_name')
-GLANCE_IMAGE_FORMAT = \
- ft_utils.get_functest_config('general.openstack.image_disk_format')
-GLANCE_IMAGE_PATH = \
- ft_utils.get_functest_config('general.directories.dir_functest_data') + \
- "/" + GLANCE_IMAGE_FILENAME
-
-
-FLAVOR = ft_utils.get_functest_config('vping.vm_flavor')
-
-# NEUTRON Private Network parameters
-PRIVATE_NET_NAME = \
- ft_utils.get_functest_config('vping.vping_private_net_name')
-PRIVATE_SUBNET_NAME = \
- ft_utils.get_functest_config('vping.vping_private_subnet_name')
-PRIVATE_SUBNET_CIDR = \
- ft_utils.get_functest_config('vping.vping_private_subnet_cidr')
-ROUTER_NAME = ft_utils.get_functest_config('vping.vping_router_name')
-
-SECGROUP_NAME = ft_utils.get_functest_config('vping.vping_sg_name')
-SECGROUP_DESCR = ft_utils.get_functest_config('vping.vping_sg_descr')
-
-
-neutron_client = None
-glance_client = None
-nova_client = None
-logger = None
-
-pp = pprint.PrettyPrinter(indent=4)
-
-
-def pMsg(value):
- """pretty printing"""
- pp.pprint(value)
-
-
-def check_repo_exist():
- if not os.path.exists(FUNCTEST_REPO):
- logger.error("Functest repository not found '%s'" % FUNCTEST_REPO)
- exit(-1)
-
-
-def get_vmname_1():
- return NAME_VM_1
-
-
-def get_vmname_2():
- return NAME_VM_2
-
-
-def init(vping_logger):
- global nova_client
- nova_client = os_utils.get_nova_client()
- global neutron_client
- neutron_client = os_utils.get_neutron_client()
- global glance_client
- glance_client = os_utils.get_glance_client()
- global logger
- logger = vping_logger
-
-
-def waitVmActive(nova, vm):
-
- # sleep and wait for VM status change
- sleep_time = 3
- count = VM_BOOT_TIMEOUT / sleep_time
- while True:
- status = os_utils.get_instance_status(nova, vm)
- logger.debug("Status: %s" % status)
- if status == "ACTIVE":
- return True
- if status == "ERROR" or status == "error":
- return False
- if count == 0:
- logger.debug("Booting a VM timed out...")
- return False
- count -= 1
- time.sleep(sleep_time)
- return False
-
-
-def create_security_group():
- sg_id = os_utils.get_security_group_id(neutron_client,
- SECGROUP_NAME)
- if sg_id != '':
- logger.info("Using existing security group '%s'..." % SECGROUP_NAME)
- else:
- logger.info("Creating security group '%s'..." % SECGROUP_NAME)
- SECGROUP = os_utils.create_security_group(neutron_client,
- SECGROUP_NAME,
- SECGROUP_DESCR)
- if not SECGROUP:
- logger.error("Failed to create the security group...")
- return False
-
- sg_id = SECGROUP['id']
-
- logger.debug("Security group '%s' with ID=%s created successfully."
- % (SECGROUP['name'], sg_id))
-
- logger.debug("Adding ICMP rules in security group '%s'..."
- % SECGROUP_NAME)
- if not os_utils.create_secgroup_rule(neutron_client, sg_id,
- 'ingress', 'icmp'):
- logger.error("Failed to create the security group rule...")
- return False
-
- logger.debug("Adding SSH rules in security group '%s'..."
- % SECGROUP_NAME)
- if not os_utils.create_secgroup_rule(neutron_client, sg_id,
- 'ingress', 'tcp',
- '22', '22'):
- logger.error("Failed to create the security group rule...")
- return False
-
- if not os_utils.create_secgroup_rule(
- neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
- logger.error("Failed to create the security group rule...")
- return False
- return sg_id
-
-
-def create_image():
- _, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
- GLANCE_IMAGE_PATH,
- GLANCE_IMAGE_FORMAT)
- if not image_id:
- exit(-1)
-
- return image_id
-
-
-def get_flavor():
- EXIT_CODE = -1
-
- # Check if the given flavor exists
- try:
- flavor = nova_client.flavors.find(name=FLAVOR)
- logger.info("Using existing Flavor '%s'..." % FLAVOR)
- return flavor
- except:
- logger.error("Flavor '%s' not found." % FLAVOR)
- logger.info("Available flavors are: ")
- pMsg(nova_client.flavor.list())
- exit(EXIT_CODE)
-
-
-def create_network_full():
- EXIT_CODE = -1
-
- network_dic = os_utils.create_network_full(neutron_client,
- PRIVATE_NET_NAME,
- PRIVATE_SUBNET_NAME,
- ROUTER_NAME,
- PRIVATE_SUBNET_CIDR)
-
- if not network_dic:
- logger.error(
- "There has been a problem when creating the neutron network")
- exit(EXIT_CODE)
- network_id = network_dic["net_id"]
- return network_id
-
-
-def delete_exist_vms():
- servers = nova_client.servers.list()
- for server in servers:
- if server.name == NAME_VM_1 or server.name == NAME_VM_2:
- logger.info("Instance %s found. Deleting..." % server.name)
- server.delete()
-
-
-def is_userdata(case):
- return case == 'vping_userdata'
-
-
-def is_ssh(case):
- return case == 'vping_ssh'
-
-
-def boot_vm(case, name, image_id, flavor, network_id, test_ip, sg_id):
- EXIT_CODE = -1
-
- config = dict()
- config['name'] = name
- config['flavor'] = flavor
- config['image'] = image_id
- config['nics'] = [{"net-id": network_id}]
- if is_userdata(case):
- config['config_drive'] = True
- if name == NAME_VM_2:
- u = ("#!/bin/sh\n\n"
- "while true; do\n"
- " ping -c 1 %s 2>&1 >/dev/null\n"
- " RES=$?\n"
- " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
- " echo 'vPing OK'\n"
- " break\n"
- " else\n"
- " echo 'vPing KO'\n"
- " fi\n"
- " sleep 1\n"
- "done\n" % test_ip)
- config['userdata'] = u
-
- logger.info("Creating instance '%s'..." % name)
- logger.debug("Configuration: %s" % config)
- vm = nova_client.servers.create(**config)
-
- # wait until VM status is active
- if not waitVmActive(nova_client, vm):
-
- logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
- name, os_utils.get_instance_status(nova_client, vm)))
- exit(EXIT_CODE)
- else:
- logger.info("Instance '%s' is ACTIVE." % name)
-
- add_secgroup(name, vm.id, sg_id)
-
- return vm
-
-
-def get_test_ip(vm):
- test_ip = vm.networks.get(PRIVATE_NET_NAME)[0]
- logger.debug("Instance '%s' got %s" % (vm.name, test_ip))
- return test_ip
-
-
-def add_secgroup(vmname, vm_id, sg_id):
- logger.info("Adding '%s' to security group '%s'..." %
- (vmname, SECGROUP_NAME))
- os_utils.add_secgroup_to_instance(nova_client, vm_id, sg_id)
-
-
-def add_float_ip(vm):
- EXIT_CODE = -1
-
- logger.info("Creating floating IP for VM '%s'..." % NAME_VM_2)
- floatip_dic = os_utils.create_floating_ip(neutron_client)
- floatip = floatip_dic['fip_addr']
-
- if floatip is None:
- logger.error("Cannot create floating IP.")
- exit(EXIT_CODE)
- logger.info("Floating IP created: '%s'" % floatip)
-
- logger.info("Associating floating ip: '%s' to VM '%s' "
- % (floatip, NAME_VM_2))
- if not os_utils.add_floating_ip(nova_client, vm.id, floatip):
- logger.error("Cannot associate floating IP to VM.")
- exit(EXIT_CODE)
-
- return floatip
-
-
-def establish_ssh(vm, floatip):
- EXIT_CODE = -1
-
- logger.info("Trying to establish SSH connection to %s..." % floatip)
- username = 'cirros'
- password = 'cubswin:)'
- ssh = paramiko.SSHClient()
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
- timeout = 50
- nolease = False
- got_ip = False
- discover_count = 0
- cidr_first_octet = PRIVATE_SUBNET_CIDR.split('.')[0]
- while timeout > 0:
- try:
- ssh.connect(floatip, username=username,
- password=password, timeout=2)
- logger.debug("SSH connection established to %s." % floatip)
- break
- except:
- logger.debug("Waiting for %s..." % floatip)
- time.sleep(6)
- timeout -= 1
-
- console_log = vm.get_console_output()
-
- # print each "Sending discover" captured on the console log
- if (len(re.findall("Sending discover", console_log)) >
- discover_count and not got_ip):
- discover_count += 1
- logger.debug("Console-log '%s': Sending discover..."
- % NAME_VM_2)
-
- # check if eth0 got an ip,the line looks like this:
- # "inet addr:192.168."....
- # if the dhcp agent fails to assing ip, this line will not appear
- if "inet addr:" + cidr_first_octet in console_log and not got_ip:
- got_ip = True
- logger.debug("The instance '%s' succeeded to get the IP "
- "from the dhcp agent." % NAME_VM_2)
-
- # if dhcp doesnt work,it shows "No lease, failing".The test will fail
- if "No lease, failing" in console_log and not nolease and not got_ip:
- nolease = True
- logger.debug("Console-log '%s': No lease, failing..."
- % NAME_VM_2)
- logger.info("The instance failed to get an IP from the "
- "DHCP agent. The test will probably timeout...")
-
- if timeout == 0: # 300 sec timeout (5 min)
- logger.error("Cannot establish connection to IP '%s'. Aborting"
- % floatip)
- exit(EXIT_CODE)
- return ssh
-
-
-def transfer_ping_script(ssh, floatip):
- EXIT_CODE = -1
-
- logger.info("Trying to transfer ping.sh to %s..." % floatip)
- scp = SCPClient(ssh.get_transport())
-
- ping_script = FUNCTEST_REPO + "/testcases/OpenStack/vPing/ping.sh"
- try:
- scp.put(ping_script, "~/")
- except:
- logger.error("Cannot SCP the file '%s' to VM '%s'"
- % (ping_script, floatip))
- exit(EXIT_CODE)
-
- cmd = 'chmod 755 ~/ping.sh'
- (stdin, stdout, stderr) = ssh.exec_command(cmd)
- for line in stdout.readlines():
- print line
-
-
-def do_vping_ssh(ssh, test_ip):
- logger.info("Waiting for ping...")
-
- sec = 0
- cmd = '~/ping.sh ' + test_ip
- flag = False
-
- while True:
- time.sleep(1)
- (stdin, stdout, stderr) = ssh.exec_command(cmd)
- output = stdout.readlines()
-
- for line in output:
- if "vPing OK" in line:
- logger.info("vPing detected!")
- EXIT_CODE = 0
- flag = True
- break
-
- elif sec == PING_TIMEOUT:
- logger.info("Timeout reached.")
- flag = True
- break
- if flag:
- break
- logger.debug("Pinging %s. Waiting for response..." % test_ip)
- sec += 1
- return EXIT_CODE, time.time()
-
-
-def do_vping_userdata(vm, test_ip):
- logger.info("Waiting for ping...")
- EXIT_CODE = -1
- sec = 0
- metadata_tries = 0
-
- while True:
- time.sleep(1)
- console_log = vm.get_console_output()
- if "vPing OK" in console_log:
- logger.info("vPing detected!")
- EXIT_CODE = 0
- break
- elif ("failed to read iid from metadata" in console_log or
- metadata_tries > 5):
- EXIT_CODE = -2
- break
- elif sec == PING_TIMEOUT:
- logger.info("Timeout reached.")
- break
- elif sec % 10 == 0:
- if "request failed" in console_log:
- logger.debug("It seems userdata is not supported in "
- "nova boot. Waiting a bit...")
- metadata_tries += 1
- else:
- logger.debug("Pinging %s. Waiting for response..." % test_ip)
- sec += 1
-
- return EXIT_CODE, time.time()
-
-
-def do_vping(case, vm, test_ip):
- if is_userdata(case):
- return do_vping_userdata(vm, test_ip)
- else:
- floatip = add_float_ip(vm)
- ssh = establish_ssh(vm, floatip)
- transfer_ping_script(ssh, floatip)
- return do_vping_ssh(ssh, test_ip)
-
-
-def check_result(code, start_time, stop_time):
- test_status = "FAIL"
- if code == 0:
- logger.info("vPing OK")
- duration = round(stop_time - start_time, 1)
- logger.info("vPing duration:'%s'" % duration)
- test_status = "PASS"
- elif code == -2:
- duration = 0
- logger.info("Userdata is not supported in nova boot. Aborting test...")
- else:
- duration = 0
- logger.error("vPing FAILED")
-
- details = {'timestart': start_time,
- 'duration': duration,
- 'status': test_status}
-
- return details
-
-
-def push_result(report, case, start_time, stop_time, details):
- if report:
- try:
- logger.debug("Pushing vPing %s results into DB..." % case)
- ft_utils.push_results_to_db('functest',
- case,
- start_time,
- stop_time,
- details['status'],
- details=details)
- except:
- logger.error("Error pushing results into Database '%s'"
- % sys.exc_info()[0])
diff --git a/testcases/__init__.py b/testcases/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/testcases/__init__.py
+++ /dev/null
diff --git a/testcases/features/copper.py b/testcases/features/copper.py
deleted file mode 100755
index ab0162626..000000000
--- a/testcases/features/copper.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2016 AT&T Intellectual Property, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import argparse
-import sys
-import time
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
-
-COPPER_REPO = \
- functest_utils.get_functest_config('general.directories.dir_repo_copper')
-RESULTS_DIR = \
- functest_utils.get_functest_config('general.directories.dir_results')
-
-logger = ft_logger.Logger("copper").getLogger()
-
-
-def main():
- cmd = "%s/tests/run.sh %s/tests" % (COPPER_REPO, COPPER_REPO)
-
- start_time = time.time()
-
- log_file = RESULTS_DIR + "/copper.log"
- ret_val = functest_utils.execute_command(cmd,
- output_file=log_file)
-
- stop_time = time.time()
- duration = round(stop_time - start_time, 1)
- if ret_val == 0:
- logger.info("COPPER PASSED")
- test_status = 'PASS'
- else:
- logger.info("COPPER FAILED")
- test_status = 'FAIL'
-
- details = {
- 'timestart': start_time,
- 'duration': duration,
- 'status': test_status,
- }
- functest_utils.logger_test_results("Copper",
- "copper-notification",
- details['status'], details)
- try:
- if args.report:
- functest_utils.push_results_to_db("copper",
- "copper-notification",
- start_time,
- stop_time,
- details['status'],
- details)
- logger.info("COPPER results pushed to DB")
- except:
- logger.error("Error pushing results into Database '%s'"
- % sys.exc_info()[0])
-
- if ret_val != 0:
- sys.exit(-1)
-
- sys.exit(0)
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/features/doctor.py b/testcases/features/doctor.py
deleted file mode 100755
index 00e5c1d6b..000000000
--- a/testcases/features/doctor.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# 0.1: This script boots the VM1 and allocates IP address from Nova
-# Later, the VM2 boots then execute cloud-init to ping VM1.
-# After successful ping, both the VMs are deleted.
-# 0.2: measure test duration and publish results under json format
-#
-#
-import argparse
-import os
-import time
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
-
-functest_yaml = functest_utils.get_functest_yaml()
-
-DOCTOR_REPO = \
- functest_utils.get_functest_config('general.directories.dir_repo_doctor')
-RESULTS_DIR = \
- functest_utils.get_functest_config('general.directories.dir_results')
-
-logger = ft_logger.Logger("doctor").getLogger()
-
-
-def main():
- exit_code = -1
-
- # if the image name is explicitly set for the doctor suite, set it as
- # enviroment variable
- if 'doctor' in functest_yaml and 'image_name' in functest_yaml['doctor']:
- os.environ["IMAGE_NAME"] = functest_yaml['doctor']['image_name']
-
- cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO
- log_file = RESULTS_DIR + "/doctor.log"
-
- start_time = time.time()
-
- ret = functest_utils.execute_command(cmd,
- info=True,
- output_file=log_file)
-
- stop_time = time.time()
- duration = round(stop_time - start_time, 1)
- if ret == 0:
- logger.info("Doctor test case OK")
- test_status = 'OK'
- exit_code = 0
- else:
- logger.info("Doctor test case FAILED")
- test_status = 'NOK'
-
- details = {
- 'timestart': start_time,
- 'duration': duration,
- 'status': test_status,
- }
- status = "FAIL"
- if details['status'] == "OK":
- status = "PASS"
- functest_utils.logger_test_results("Doctor",
- "doctor-notification",
- status, details)
- if args.report:
- functest_utils.push_results_to_db("doctor",
- "doctor-notification",
- start_time,
- stop_time,
- status,
- details)
- logger.info("Doctor results pushed to DB")
-
- exit(exit_code)
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/features/domino.py b/testcases/features/domino.py
deleted file mode 100755
index 7705c07bd..000000000
--- a/testcases/features/domino.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# 0.1: This script boots the VM1 and allocates IP address from Nova
-# Later, the VM2 boots then execute cloud-init to ping VM1.
-# After successful ping, both the VMs are deleted.
-# 0.2: measure test duration and publish results under json format
-# 0.3: add report flag to push results when needed
-#
-
-import argparse
-import time
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
-
-
-DOMINO_REPO = \
- ft_utils.get_functest_config('general.directories.dir_repo_domino')
-RESULTS_DIR = \
- ft_utils.get_functest_config('general.directories.dir_results')
-
-logger = ft_logger.Logger("domino").getLogger()
-
-
-def main():
- cmd = 'cd %s && ./tests/run_multinode.sh' % DOMINO_REPO
- log_file = RESULTS_DIR + "/domino.log"
- start_time = time.time()
-
- ret = ft_utils.execute_command(cmd,
- output_file=log_file)
-
- stop_time = time.time()
- duration = round(stop_time - start_time, 1)
- if ret == 0 and duration > 1:
- logger.info("domino OK")
- test_status = 'OK'
- elif ret == 0 and duration <= 1:
- logger.info("domino TEST SKIPPED")
- test_status = 'SKIPPED'
- else:
- logger.info("domino FAILED")
- test_status = 'NOK'
-
- details = {
- 'timestart': start_time,
- 'duration': duration,
- 'status': test_status,
- }
-
- status = "FAIL"
- if details['status'] == "OK":
- status = "PASS"
- elif details['status'] == "SKIPPED":
- status = "SKIP"
-
- ft_utils.logger_test_results("Domino",
- "domino-multinode",
- status,
- details)
- if args.report:
- if status is not "SKIP":
- ft_utils.push_results_to_db("domino",
- "domino-multinode",
- start_time,
- stop_time,
- status,
- details)
- logger.info("Domino results pushed to DB")
-
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/features/multisite.py b/testcases/features/multisite.py
deleted file mode 100755
index 6d492182c..000000000
--- a/testcases/features/multisite.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Execute Multisite Tempest test cases
-#
-import functest.utils.functest_logger as ft_logger
-
-logger = ft_logger.Logger("multisite").getLogger()
-
-
-def main():
- logger.info("multisite OK")
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/features/promise.py b/testcases/features/promise.py
deleted file mode 100755
index cce0f5dc1..000000000
--- a/testcases/features/promise.py
+++ /dev/null
@@ -1,255 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Maintainer : jose.lausuch@ericsson.com
-#
-import argparse
-import json
-import os
-import subprocess
-import time
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as openstack_utils
-import keystoneclient.v2_0.client as ksclient
-from neutronclient.v2_0 import client as ntclient
-import novaclient.client as nvclient
-
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
-
-
-dirs = ft_utils.get_functest_config('general.directories')
-PROMISE_REPO = dirs.get('dir_repo_promise')
-RESULTS_DIR = ft_utils.get_functest_config('general.directories.dir_results')
-
-TENANT_NAME = ft_utils.get_functest_config('promise.tenant_name')
-TENANT_DESCRIPTION = \
- ft_utils.get_functest_config('promise.tenant_description')
-USER_NAME = ft_utils.get_functest_config('promise.user_name')
-USER_PWD = ft_utils.get_functest_config('promise.user_pwd')
-IMAGE_NAME = ft_utils.get_functest_config('promise.image_name')
-FLAVOR_NAME = ft_utils.get_functest_config('promise.flavor_name')
-FLAVOR_VCPUS = ft_utils.get_functest_config('promise.flavor_vcpus')
-FLAVOR_RAM = ft_utils.get_functest_config('promise.flavor_ram')
-FLAVOR_DISK = ft_utils.get_functest_config('promise.flavor_disk')
-
-
-GLANCE_IMAGE_FILENAME = \
- ft_utils.get_functest_config('general.openstack.image_file_name')
-GLANCE_IMAGE_FORMAT = \
- ft_utils.get_functest_config('general.openstack.image_disk_format')
-GLANCE_IMAGE_PATH = \
- ft_utils.get_functest_config('general.directories.dir_functest_data') + \
- "/" + GLANCE_IMAGE_FILENAME
-
-NET_NAME = ft_utils.get_functest_config('promise.network_name')
-SUBNET_NAME = ft_utils.get_functest_config('promise.subnet_name')
-SUBNET_CIDR = ft_utils.get_functest_config('promise.subnet_cidr')
-ROUTER_NAME = ft_utils.get_functest_config('promise.router_name')
-
-
-""" logging configuration """
-logger = ft_logger.Logger("promise").getLogger()
-
-
-def main():
- exit_code = -1
- start_time = time.time()
- ks_creds = openstack_utils.get_credentials("keystone")
- nv_creds = openstack_utils.get_credentials("nova")
- nt_creds = openstack_utils.get_credentials("neutron")
-
- keystone = ksclient.Client(**ks_creds)
-
- user_id = openstack_utils.get_user_id(keystone, ks_creds['username'])
- if user_id == '':
- logger.error("Error : Failed to get id of %s user" %
- ks_creds['username'])
- exit(-1)
-
- logger.info("Creating tenant '%s'..." % TENANT_NAME)
- tenant_id = openstack_utils.create_tenant(
- keystone, TENANT_NAME, TENANT_DESCRIPTION)
- if not tenant_id:
- logger.error("Error : Failed to create %s tenant" % TENANT_NAME)
- exit(-1)
- logger.debug("Tenant '%s' created successfully." % TENANT_NAME)
-
- roles_name = ["admin", "Admin"]
- role_id = ''
- for role_name in roles_name:
- if role_id == '':
- role_id = openstack_utils.get_role_id(keystone, role_name)
-
- if role_id == '':
- logger.error("Error : Failed to get id for %s role" % role_name)
- exit(-1)
-
- logger.info("Adding role '%s' to tenant '%s'..." % (role_id, TENANT_NAME))
- if not openstack_utils.add_role_user(keystone, user_id,
- role_id, tenant_id):
- logger.error("Error : Failed to add %s on tenant %s" %
- (ks_creds['username'], TENANT_NAME))
- exit(-1)
- logger.debug("Role added successfully.")
-
- logger.info("Creating user '%s'..." % USER_NAME)
- user_id = openstack_utils.create_user(
- keystone, USER_NAME, USER_PWD, None, tenant_id)
-
- if not user_id:
- logger.error("Error : Failed to create %s user" % USER_NAME)
- exit(-1)
- logger.debug("User '%s' created successfully." % USER_NAME)
-
- logger.info("Updating OpenStack credentials...")
- ks_creds.update({
- "username": TENANT_NAME,
- "password": TENANT_NAME,
- "tenant_name": TENANT_NAME,
- })
-
- nt_creds.update({
- "tenant_name": TENANT_NAME,
- })
-
- nv_creds.update({
- "project_id": TENANT_NAME,
- })
-
- glance = openstack_utils.get_glance_client()
- nova = nvclient.Client("2", **nv_creds)
-
- logger.info("Creating image '%s' from '%s'..." % (IMAGE_NAME,
- GLANCE_IMAGE_PATH))
- image_id = openstack_utils.create_glance_image(glance,
- IMAGE_NAME,
- GLANCE_IMAGE_PATH)
- if not image_id:
- logger.error("Failed to create the Glance image...")
- exit(-1)
- logger.debug("Image '%s' with ID '%s' created successfully." % (IMAGE_NAME,
- image_id))
- flavor_id = openstack_utils.get_flavor_id(nova, FLAVOR_NAME)
- if flavor_id == '':
- logger.info("Creating flavor '%s'..." % FLAVOR_NAME)
- flavor_id = openstack_utils.create_flavor(nova,
- FLAVOR_NAME,
- FLAVOR_RAM,
- FLAVOR_DISK,
- FLAVOR_VCPUS)
- if not flavor_id:
- logger.error("Failed to create the Flavor...")
- exit(-1)
- logger.debug("Flavor '%s' with ID '%s' created successfully." %
- (FLAVOR_NAME, flavor_id))
- else:
- logger.debug("Using existing flavor '%s' with ID '%s'..."
- % (FLAVOR_NAME, flavor_id))
-
- neutron = ntclient.Client(**nt_creds)
-
- network_dic = openstack_utils.create_network_full(neutron,
- NET_NAME,
- SUBNET_NAME,
- ROUTER_NAME,
- SUBNET_CIDR)
- if not network_dic:
- logger.error("Failed to create the private network...")
- exit(-1)
-
- logger.info("Exporting environment variables...")
- os.environ["NODE_ENV"] = "functest"
- os.environ["OS_TENANT_NAME"] = TENANT_NAME
- os.environ["OS_USERNAME"] = USER_NAME
- os.environ["OS_PASSWORD"] = USER_PWD
- os.environ["OS_TEST_IMAGE"] = image_id
- os.environ["OS_TEST_FLAVOR"] = flavor_id
- os.environ["OS_TEST_NETWORK"] = network_dic["net_id"]
-
- os.chdir(PROMISE_REPO)
- results_file_name = RESULTS_DIR + '/' + 'promise-results.json'
- results_file = open(results_file_name, 'w+')
- cmd = 'npm run -s test -- --reporter json'
-
- logger.info("Running command: %s" % cmd)
- ret = subprocess.call(cmd, shell=True, stdout=results_file,
- stderr=subprocess.STDOUT)
- results_file.close()
-
- if ret == 0:
- logger.info("The test succeeded.")
- # test_status = 'OK'
- else:
- logger.info("The command '%s' failed." % cmd)
- # test_status = "Failed"
-
- # Print output of file
- with open(results_file_name, 'r') as results_file:
- data = results_file.read()
- logger.debug("\n%s" % data)
- json_data = json.loads(data)
-
- suites = json_data["stats"]["suites"]
- tests = json_data["stats"]["tests"]
- passes = json_data["stats"]["passes"]
- pending = json_data["stats"]["pending"]
- failures = json_data["stats"]["failures"]
- start_time_json = json_data["stats"]["start"]
- end_time = json_data["stats"]["end"]
- duration = float(json_data["stats"]["duration"]) / float(1000)
-
- logger.info("\n"
- "****************************************\n"
- " Promise test report\n\n"
- "****************************************\n"
- " Suites: \t%s\n"
- " Tests: \t%s\n"
- " Passes: \t%s\n"
- " Pending: \t%s\n"
- " Failures:\t%s\n"
- " Start: \t%s\n"
- " End: \t%s\n"
- " Duration:\t%s\n"
- "****************************************\n\n"
- % (suites, tests, passes, pending, failures,
- start_time_json, end_time, duration))
-
- if args.report:
- stop_time = time.time()
- json_results = {"timestart": start_time, "duration": duration,
- "tests": int(tests), "failures": int(failures)}
- logger.debug("Promise Results json: " + str(json_results))
-
- # criteria for Promise in Release B was 100% of tests OK
- status = "FAIL"
- if int(tests) > 32 and int(failures) < 1:
- status = "PASS"
- exit_code = 0
-
- ft_utils.push_results_to_db("promise",
- "promise",
- start_time,
- stop_time,
- status,
- json_results)
-
- exit(exit_code)
-
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/features/sfc/SSHUtils.py b/testcases/features/sfc/SSHUtils.py
deleted file mode 100644
index 9c8c2c727..000000000
--- a/testcases/features/sfc/SSHUtils.py
+++ /dev/null
@@ -1,120 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# Authors: George Paraskevopoulos (geopar@intracom-telecom.com)
-# Jose Lausuch (jose.lausuch@ericsson.com)
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-import paramiko
-import functest.utils.functest_logger as rl
-import os
-
-logger = rl.Logger('SSHUtils').getLogger()
-
-
-def get_ssh_client(hostname, username, password=None, proxy=None):
- client = None
- try:
- if proxy is None:
- client = paramiko.SSHClient()
- else:
- client = ProxyHopClient()
- client.configure_jump_host(proxy['ip'],
- proxy['username'],
- proxy['password'])
-
- if client is None:
- raise Exception('Could not connect to client')
-
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- client.connect(hostname,
- username=username,
- password=password)
- return client
- except Exception, e:
- logger.error(e)
- return None
-
-
-def get_file(ssh_conn, src, dest):
- try:
- sftp = ssh_conn.open_sftp()
- sftp.get(src, dest)
- return True
- except Exception, e:
- logger.error("Error [get_file(ssh_conn, '%s', '%s']: %s" %
- (src, dest, e))
- return None
-
-
-def put_file(ssh_conn, src, dest):
- try:
- sftp = ssh_conn.open_sftp()
- sftp.put(src, dest)
- return True
- except Exception, e:
- logger.error("Error [put_file(ssh_conn, '%s', '%s']: %s" %
- (src, dest, e))
- return None
-
-
-class ProxyHopClient(paramiko.SSHClient):
- '''
- Connect to a remote server using a proxy hop
- '''
- def __init__(self, *args, **kwargs):
- self.logger = rl.Logger("ProxyHopClient").getLogger()
- self.proxy_ssh = None
- self.proxy_transport = None
- self.proxy_channel = None
- self.proxy_ip = None
- self.proxy_ssh_key = None
- self.local_ssh_key = os.path.join(os.getcwd(), 'id_rsa')
- super(ProxyHopClient, self).__init__(*args, **kwargs)
-
- def configure_jump_host(self, jh_ip, jh_user, jh_pass,
- jh_ssh_key='/root/.ssh/id_rsa'):
- self.proxy_ip = jh_ip
- self.proxy_ssh_key = jh_ssh_key
- self.proxy_ssh = paramiko.SSHClient()
- self.proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- self.proxy_ssh.connect(jh_ip,
- username=jh_user,
- password=jh_pass)
- self.proxy_transport = self.proxy_ssh.get_transport()
-
- def connect(self, hostname, port=22, username='root', password=None,
- pkey=None, key_filename=None, timeout=None, allow_agent=True,
- look_for_keys=True, compress=False, sock=None, gss_auth=False,
- gss_kex=False, gss_deleg_creds=True, gss_host=None,
- banner_timeout=None):
- try:
- if self.proxy_ssh is None:
- raise Exception('You must configure the jump '
- 'host before calling connect')
-
- get_file_res = get_file(self.proxy_ssh,
- self.proxy_ssh_key,
- self.local_ssh_key)
- if get_file_res is None:
- raise Exception('Could\'t fetch SSH key from jump host')
- proxy_key = (paramiko.RSAKey
- .from_private_key_file(self.local_ssh_key))
-
- self.proxy_channel = self.proxy_transport.open_channel(
- "direct-tcpip",
- (hostname, 22),
- (self.proxy_ip, 22))
-
- self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- super(ProxyHopClient, self).connect(hostname,
- username=username,
- pkey=proxy_key,
- sock=self.proxy_channel)
- os.remove(self.local_ssh_key)
- except Exception, e:
- self.logger.error(e)
diff --git a/testcases/features/sfc/compute_presetup_CI.bash b/testcases/features/sfc/compute_presetup_CI.bash
deleted file mode 100755
index 36148aa15..000000000
--- a/testcases/features/sfc/compute_presetup_CI.bash
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# This script must be use with vxlan-gpe + nsh. Once we have eth + nsh support
-# in ODL, we will not need it anymore
-
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-#ip=`sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep compute|\
-#awk '{print $10}' | head -1`
-
-ip=$1
-echo $ip
-#sshpass -p r00tme scp $ssh_options correct_classifier.bash ${INSTALLER_IP}:/root
-#sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp correct_classifier.bash '"$ip"':/root'
-
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ifconfig br-int up'
-output=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route | \
-cut -d" " -f1 | grep 11.0.0.0' ; exit 0)
-
-if [ -z "$output" ]; then
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route add 11.0.0.0/24 \
-dev br-int'
-fi
diff --git a/testcases/features/sfc/correct_classifier.bash b/testcases/features/sfc/correct_classifier.bash
deleted file mode 100755
index fb08af5c1..000000000
--- a/testcases/features/sfc/correct_classifier.bash
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-#This scripts correct the current ODL bug which does not detect
-#when SFF and classifier are in the same swtich
-
-nsp=`ovs-ofctl -O Openflow13 dump-flows br-int table=11 | \
-grep "NXM_NX_NSP" | head -1 | cut -d',' -f13 | cut -d':' -f2 \
-| cut -d'-' -f1`
-
-ip=`ovs-ofctl -O Openflow13 dump-flows br-int table=11 | \
-grep NXM_NX_NSH_C1 | head -1 | cut -d':' -f5 | cut -d'-' -f1`
-
-output_port=`ovs-ofctl -O Openflow13 show br-int | \
-grep vxgpe | cut -d'(' -f1`
-
-output_port2=`echo $output_port`
-
-echo "This is the nsp =$(($nsp))"
-echo "This is the ip=$ip"
-echo "This is the vxlan-gpe port=$output_port2"
-
-ovs-ofctl -O Openflow13 del-flows br-int "table=11,tcp,reg0=0x1,tp_dst=80"
-ovs-ofctl -O Openflow13 del-flows br-int "table=11,tcp,reg0=0x1,tp_dst=22"
-
-ovs-ofctl -O Openflow13 add-flow br-int "table=11,tcp,reg0=0x1,tp_dst=80 \
-actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_NSH_C2[],push_nsh,\
-load:0x1->NXM_NX_NSH_MDTYPE[],load:0x3->NXM_NX_NSH_NP[],\
-load:$ip->NXM_NX_NSH_C1[],load:$nsp->NXM_NX_NSP[0..23],\
-load:0xff->NXM_NX_NSI[],load:$ip->NXM_NX_TUN_IPV4_DST[],\
-load:$nsp->NXM_NX_TUN_ID[0..31],resubmit($output_port,0)"
-
-ovs-ofctl -O Openflow13 add-flow br-int "table=11,tcp,reg0=0x1,tp_dst=22\
- actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_NSH_C2[],push_nsh,\
-load:0x1->NXM_NX_NSH_MDTYPE[],load:0x3->NXM_NX_NSH_NP[],\
-load:$ip->NXM_NX_NSH_C1[],load:$nsp->NXM_NX_NSP[0..23],\
-load:0xff->NXM_NX_NSI[],load:$ip->NXM_NX_TUN_IPV4_DST[],\
-load:$nsp->NXM_NX_TUN_ID[0..31],resubmit($output_port,0)"
diff --git a/testcases/features/sfc/delete.sh b/testcases/features/sfc/delete.sh
deleted file mode 100755
index c04ae6375..000000000
--- a/testcases/features/sfc/delete.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-tacker sfc-classifier-delete red_http
-tacker sfc-classifier-delete blue_ssh
-tacker sfc-classifier-delete red_ssh
-tacker sfc-classifier-delete blue_http
-tacker sfc-delete red
-tacker sfc-delete blue
-tacker vnf-delete testVNF1
-tacker vnf-delete testVNF2
-tacker vnfd-delete test-vnfd1
-tacker vnfd-delete test-vnfd2
-openstack stack delete sfc --y
-openstack stack delete sfc_test1 --y
-openstack stack delete sfc_test2 --y
-nova delete client
-nova delete server
diff --git a/testcases/features/sfc/ovs_utils.py b/testcases/features/sfc/ovs_utils.py
deleted file mode 100644
index 20ab5a7e3..000000000
--- a/testcases/features/sfc/ovs_utils.py
+++ /dev/null
@@ -1,117 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import functest.utils.functest_logger as rl
-import os
-import time
-import shutil
-
-logger = rl.Logger('ovs_utils').getLogger()
-
-
-class OVSLogger(object):
- def __init__(self, basedir, ft_resdir):
- self.ovs_dir = basedir
- self.ft_resdir = ft_resdir
- self.__mkdir_p(self.ovs_dir)
-
- def __mkdir_p(self, dirpath):
- if not os.path.exists(dirpath):
- os.makedirs(dirpath)
-
- def __ssh_host(self, ssh_conn, host_prefix='10.20.0'):
- try:
- _, stdout, _ = ssh_conn.exec_command('hostname -I')
- hosts = stdout.readline().strip().split(' ')
- found_host = [h for h in hosts if h.startswith(host_prefix)][0]
- return found_host
- except Exception, e:
- logger.error(e)
-
- def __dump_to_file(self, operation, host, text, timestamp=None):
- ts = (timestamp if timestamp is not None
- else time.strftime("%Y%m%d-%H%M%S"))
- dumpdir = os.path.join(self.ovs_dir, ts)
- self.__mkdir_p(dumpdir)
- fname = '{0}_{1}'.format(operation, host)
- with open(os.path.join(dumpdir, fname), 'w') as f:
- f.write(text)
-
- def __remote_cmd(self, ssh_conn, cmd):
- try:
- _, stdout, stderr = ssh_conn.exec_command(cmd)
- errors = stderr.readlines()
- if len(errors) > 0:
- host = self.__ssh_host(ssh_conn)
- logger.error(''.join(errors))
- raise Exception('Could not execute {0} in {1}'
- .format(cmd, host))
- output = ''.join(stdout.readlines())
- return output
- except Exception, e:
- logger.error('[__remote_command(ssh_client, {0})]: {1}'
- .format(cmd, e))
- return None
-
- def create_artifact_archive(self):
- shutil.make_archive(self.ovs_dir,
- 'zip',
- root_dir=os.path.dirname(self.ovs_dir),
- base_dir=self.ovs_dir)
- shutil.copy2('{0}.zip'.format(self.ovs_dir), self.ft_resdir)
-
- def ofctl_dump_flows(self, ssh_conn, br='br-int',
- choose_table=None, timestamp=None):
- try:
- cmd = 'ovs-ofctl -OOpenFlow13 dump-flows {0}'.format(br)
- if choose_table is not None:
- cmd = '{0} table={1}'.format(cmd, choose_table)
- output = self.__remote_cmd(ssh_conn, cmd)
- operation = 'ofctl_dump_flows'
- host = self.__ssh_host(ssh_conn)
- self.__dump_to_file(operation, host, output, timestamp=timestamp)
- return output
- except Exception, e:
- logger.error('[ofctl_dump_flows(ssh_client, {0}, {1})]: {2}'
- .format(br, choose_table, e))
- return None
-
- def vsctl_show(self, ssh_conn, timestamp=None):
- try:
- cmd = 'ovs-vsctl show'
- output = self.__remote_cmd(ssh_conn, cmd)
- operation = 'vsctl_show'
- host = self.__ssh_host(ssh_conn)
- self.__dump_to_file(operation, host, output, timestamp=timestamp)
- return output
- except Exception, e:
- logger.error('[vsctl_show(ssh_client)]: {0}'.format(e))
- return None
-
- def dump_ovs_logs(self, controller_clients, compute_clients,
- related_error=None, timestamp=None):
- if timestamp is None:
- timestamp = time.strftime("%Y%m%d-%H%M%S")
-
- for controller_client in controller_clients:
- self.ofctl_dump_flows(controller_client,
- timestamp=timestamp)
- self.vsctl_show(controller_client,
- timestamp=timestamp)
-
- for compute_client in compute_clients:
- self.ofctl_dump_flows(compute_client,
- timestamp=timestamp)
- self.vsctl_show(compute_client,
- timestamp=timestamp)
-
- if related_error is not None:
- dumpdir = os.path.join(self.ovs_dir, timestamp)
- with open(os.path.join(dumpdir, 'error'), 'w') as f:
- f.write(related_error)
diff --git a/testcases/features/sfc/prepare_odl_sfc.bash b/testcases/features/sfc/prepare_odl_sfc.bash
deleted file mode 100755
index 80ed9bd92..000000000
--- a/testcases/features/sfc/prepare_odl_sfc.bash
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-#
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-# Manuel Buil (manuel.buil@ericsson.com)
-# Prepares the controller and the compute nodes for the odl-sfc testcase
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-ODL_SFC_LOG=/home/opnfv/functest/results/odl-sfc.log
-ODL_SFC_DIR=${FUNCTEST_REPO_DIR}/testcases/features/sfc
-
-# Split the output to the log file and redirect STDOUT and STDERR to /dev/null
-bash ${ODL_SFC_DIR}/server_presetup_CI.bash |& \
- tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1
-
-# Get return value from PIPESTATUS array (bash specific feature)
-ret_val=${PIPESTATUS[0]}
-if [ $ret_val != 0 ]; then
- echo "The tacker server deployment failed"
- exit $ret_val
-fi
-echo "The tacker server was deployed successfully"
-
-bash ${ODL_SFC_DIR}/compute_presetup_CI.bash |& \
- tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1
-
-ret_val=${PIPESTATUS[0]}
-if [ $ret_val != 0 ]; then
- exit $ret_val
-fi
-
-exit 0
diff --git a/testcases/features/sfc/prepare_odl_sfc.py b/testcases/features/sfc/prepare_odl_sfc.py
deleted file mode 100755
index 78f4d7646..000000000
--- a/testcases/features/sfc/prepare_odl_sfc.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-# Manuel Buil (manuel.buil@ericsson.com)
-# Prepares the controller and the compute nodes for the odl-sfc testcase
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import os
-import sys
-import subprocess
-import paramiko
-import functest.utils.functest_logger as ft_logger
-
-logger = ft_logger.Logger("ODL_SFC").getLogger()
-
-try:
- FUNCTEST_REPO_DIR = os.environ['FUNCTEST_REPO_DIR']
-except:
- logger.debug("FUNCTEST_REPO_DIR does not exist!!!!!")
-
-FUNCTEST_REPO_DIR = "/home/opnfv/repos/functest"
-
-try:
- INSTALLER_IP = os.environ['INSTALLER_IP']
-
-except:
- logger.debug("INSTALLER_IP does not exist. We create 10.20.0.2")
- INSTALLER_IP = "10.20.0.2"
-
-os.environ['ODL_SFC_LOG'] = "/home/opnfv/functest/results/odl-sfc.log"
-os.environ['ODL_SFC_DIR'] = FUNCTEST_REPO_DIR + "/testcases/features/sfc"
-
-command = os.environ['ODL_SFC_DIR'] + ("/server_presetup_CI.bash | "
- "tee -a ${ODL_SFC_LOG} "
- "1>/dev/null 2>&1")
-
-output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# i = line.rstrip()
-# print(i)
-
-# Make sure the process is finished before checking the returncode
-if not output.poll():
- output.wait()
-
-# Get return value
-if output.returncode:
- print("The presetup of the server did not work")
- sys.exit(output.returncode)
-
-logger.info("The presetup of the server worked ")
-
-ssh_options = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-ssh = paramiko.SSHClient()
-ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
-try:
- ssh.connect(INSTALLER_IP, username="root",
- password="r00tme", timeout=2)
- command = "fuel node | grep compute | awk '{print $10}'"
- logger.info("Executing ssh to collect the compute IPs")
- (stdin, stdout, stderr) = ssh.exec_command(command)
-except:
- logger.debug("Something went wrong in the ssh to collect the computes IP")
-
-output = stdout.readlines()
-for ip in output:
- command = os.environ['ODL_SFC_DIR'] + ("/compute_presetup_CI.bash "
- "" + ip.rstrip() + "| tee -a "
- "${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
- output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# print(line)
-# sys.stdout.flush()
-
- output.stdout.close()
-
- if not (output.poll()):
- output.wait()
-
- # Get return value
- if output.returncode:
- print("The compute config did not work on compute %s" % ip)
- sys.exit(output.returncode)
-
-sys.exit(0)
diff --git a/testcases/features/sfc/server_presetup_CI.bash b/testcases/features/sfc/server_presetup_CI.bash
deleted file mode 100755
index 240353f5b..000000000
--- a/testcases/features/sfc/server_presetup_CI.bash
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-ip=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep controller|awk '{print $10}' | head -1)
-echo $ip
-
-sshpass -p r00tme scp $ssh_options delete.sh ${INSTALLER_IP}:/root
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp '"$ip"':/root/tackerc .'
-sshpass -p r00tme scp $ssh_options ${INSTALLER_IP}:/root/tackerc $BASEDIR
diff --git a/testcases/features/sfc/sfc.py b/testcases/features/sfc/sfc.py
deleted file mode 100755
index 2cc7ac9c0..000000000
--- a/testcases/features/sfc/sfc.py
+++ /dev/null
@@ -1,545 +0,0 @@
-import argparse
-import os
-import subprocess
-import sys
-import time
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-import re
-import json
-import SSHUtils as ssh_utils
-import ovs_utils
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("ODL_SFC").getLogger()
-
-FUNCTEST_RESULTS_DIR = '/home/opnfv/functest/results/odl-sfc'
-FUNCTEST_REPO = ft_utils.FUNCTEST_REPO
-REPO_PATH = os.environ['repos_dir'] + '/functest/'
-HOME = os.environ['HOME'] + "/"
-CLIENT = "client"
-SERVER = "server"
-FLAVOR = "custom"
-IMAGE_NAME = "sf_nsh_colorado"
-IMAGE_FILENAME = "sf_nsh_colorado.qcow2"
-IMAGE_FORMAT = "qcow2"
-IMAGE_DIR = "/home/opnfv/functest/data"
-IMAGE_PATH = IMAGE_DIR + "/" + IMAGE_FILENAME
-IMAGE_URL = "http://artifacts.opnfv.org/sfc/demo/" + IMAGE_FILENAME
-
-# NEUTRON Private Network parameters
-NET_NAME = "example-net"
-SUBNET_NAME = "example-subnet"
-SUBNET_CIDR = "11.0.0.0/24"
-ROUTER_NAME = "example-router"
-SECGROUP_NAME = "example-sg"
-SECGROUP_DESCR = "Example Security group"
-SFC_TEST_DIR = REPO_PATH + "/testcases/features/sfc/"
-TACKER_SCRIPT = SFC_TEST_DIR + "sfc_tacker.bash"
-TACKER_CHANGECLASSI = SFC_TEST_DIR + "sfc_change_classi.bash"
-ssh_options = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-json_results = {"tests": 4, "failures": 0}
-
-PROXY = {
- 'ip': '10.20.0.2',
- 'username': 'root',
- 'password': 'r00tme'
-}
-
-# run given command locally and return commands output if success
-
-
-def run_cmd(cmd, wdir=None, ignore_stderr=False, ignore_no_output=True):
- pipe = subprocess.Popen(cmd, shell=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, cwd=wdir)
-
- (output, errors) = pipe.communicate()
- if output:
- output = output.strip()
- if pipe.returncode < 0:
- logger.error(errors)
- return False
- if errors:
- logger.error(errors)
- if ignore_stderr:
- return True
- else:
- return False
-
- if ignore_no_output:
- if not output:
- return True
-
- return output
-
-# run given command on OpenStack controller
-
-
-def run_cmd_on_cntlr(cmd):
- ip_cntlrs = get_openstack_node_ips("controller")
- if not ip_cntlrs:
- return None
-
- ssh_cmd = "ssh %s %s %s" % (ssh_options, ip_cntlrs[0], cmd)
- return run_cmd_on_fm(ssh_cmd)
-
-# run given command on OpenStack Compute node
-
-
-def run_cmd_on_compute(cmd):
- ip_computes = get_openstack_node_ips("compute")
- if not ip_computes:
- return None
-
- ssh_cmd = "ssh %s %s %s" % (ssh_options, ip_computes[0], cmd)
- return run_cmd_on_fm(ssh_cmd)
-
-# run given command on Fuel Master
-
-
-def run_cmd_on_fm(cmd, username="root", passwd="r00tme"):
- ip = os.environ.get("INSTALLER_IP")
- ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
- passwd, ssh_options, username, ip, cmd)
- return run_cmd(ssh_cmd)
-
-# run given command on Remote Machine, Can be VM
-
-
-def run_cmd_remote(ip, cmd, username="root", passwd="opnfv"):
- ssh_opt_append = "%s -o ConnectTimeout=50 " % ssh_options
- ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
- passwd, ssh_opt_append, username, ip, cmd)
- return run_cmd(ssh_cmd)
-
-# Get OpenStack Nodes IP Address
-
-
-def get_openstack_node_ips(role):
- fuel_env = os.environ.get("FUEL_ENV")
- if fuel_env is not None:
- cmd = "fuel2 node list -f json -e %s" % fuel_env
- else:
- cmd = "fuel2 node list -f json"
-
- nodes = run_cmd_on_fm(cmd)
- ips = []
- nodes = json.loads(nodes)
- for node in nodes:
- if role in node["roles"]:
- ips.append(node["ip"])
-
- return ips
-
-# Configures IPTABLES on OpenStack Controller
-
-
-def configure_iptables():
- iptable_cmds = ["iptables -P INPUT ACCEPT",
- "iptables -t nat -P INPUT ACCEPT",
- "iptables -A INPUT -m state \
- --state NEW,ESTABLISHED,RELATED -j ACCEPT"]
-
- for cmd in iptable_cmds:
- logger.info("Configuring %s on contoller" % cmd)
- run_cmd_on_cntlr(cmd)
-
- return
-
-
-def download_image():
- if not os.path.isfile(IMAGE_PATH):
- logger.info("Downloading image")
- ft_utils.download_url(IMAGE_URL, IMAGE_DIR)
-
- logger.info("Using old image")
- return
-
-
-def setup_glance(glance_client):
- image_id = os_utils.create_glance_image(glance_client,
- IMAGE_NAME,
- IMAGE_PATH,
- disk=IMAGE_FORMAT,
- container="bare",
- public=True)
-
- return image_id
-
-
-def setup_neutron(neutron_client):
- n_dict = os_utils.create_network_full(neutron_client,
- NET_NAME,
- SUBNET_NAME,
- ROUTER_NAME,
- SUBNET_CIDR)
- if not n_dict:
- logger.error("failed to create neutron network")
- sys.exit(-1)
-
- network_id = n_dict["net_id"]
- return network_id
-
-
-def setup_ingress_egress_secgroup(neutron_client, protocol,
- min_port=None, max_port=None):
- secgroups = os_utils.get_security_groups(neutron_client)
- for sg in secgroups:
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'ingress', protocol,
- port_range_min=min_port,
- port_range_max=max_port)
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'egress', protocol,
- port_range_min=min_port,
- port_range_max=max_port)
- return
-
-
-def setup_security_groups(neutron_client):
- sg_id = os_utils.create_security_group_full(neutron_client,
- SECGROUP_NAME, SECGROUP_DESCR)
- setup_ingress_egress_secgroup(neutron_client, "icmp")
- setup_ingress_egress_secgroup(neutron_client, "udp", 67, 68)
- setup_ingress_egress_secgroup(neutron_client, "tcp", 22, 22)
- setup_ingress_egress_secgroup(neutron_client, "tcp", 80, 80)
- return sg_id
-
-
-def boot_instance(nova_client, name, flavor, image_id, network_id, sg_id):
- logger.info("Creating instance '%s'..." % name)
- logger.debug(
- "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
- "network=%s \n" % (name, flavor, image_id, network_id))
-
- instance = os_utils.create_instance_and_wait_for_active(flavor,
- image_id,
- network_id,
- name)
-
- if instance is None:
- logger.error("Error while booting instance.")
- sys.exit(-1)
-
- instance_ip = instance.networks.get(NET_NAME)[0]
- logger.debug("Instance '%s' got private ip '%s'." %
- (name, instance_ip))
-
- logger.info("Adding '%s' to security group %s" % (name, SECGROUP_NAME))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
-
- return instance_ip
-
-
-def ping(remote, pkt_cnt=1, iface=None, retries=100, timeout=None):
- ping_cmd = 'ping'
-
- if timeout:
- ping_cmd = ping_cmd + ' -w %s' % timeout
-
- grep_cmd = "grep -e 'packet loss' -e rtt"
-
- if iface is not None:
- ping_cmd = ping_cmd + ' -I %s' % iface
-
- ping_cmd = ping_cmd + ' -i 0 -c %d %s' % (pkt_cnt, remote)
- cmd = ping_cmd + '|' + grep_cmd
-
- while retries > 0:
- output = run_cmd(cmd)
- if not output:
- return False
-
- match = re.search('(\d*)% packet loss', output)
- if not match:
- return False
-
- packet_loss = int(match.group(1))
- if packet_loss == 0:
- return True
-
- retries = retries - 1
-
- return False
-
-
-def get_floating_ips(nova_client, neutron_client):
- ips = []
- instances = nova_client.servers.list(search_opts={'all_tenants': 1})
- for instance in instances:
- floatip_dic = os_utils.create_floating_ip(neutron_client)
- floatip = floatip_dic['fip_addr']
- instance.add_floating_ip(floatip)
- logger.info("Instance name and ip %s:%s " % (instance.name, floatip))
- logger.info("Waiting for instance %s:%s to come up" %
- (instance.name, floatip))
- if not ping(floatip):
- logger.info("Instance %s:%s didn't come up" %
- (instance.name, floatip))
- sys.exit(1)
-
- if instance.name == "server":
- logger.info("Server:%s is reachable" % floatip)
- server_ip = floatip
- elif instance.name == "client":
- logger.info("Client:%s is reachable" % floatip)
- client_ip = floatip
- else:
- logger.info("SF:%s is reachable" % floatip)
- ips.append(floatip)
-
- return server_ip, client_ip, ips[1], ips[0]
-
-# Start http server on a give machine, Can be VM
-
-
-def start_http_server(ip):
- cmd = "\'python -m SimpleHTTPServer 80"
- cmd = cmd + " > /dev/null 2>&1 &\'"
- return run_cmd_remote(ip, cmd)
-
-# Set firewall using vxlan_tool.py on a give machine, Can be VM
-
-
-def vxlan_firewall(sf, iface="eth0", port="22", block=True):
- cmd = "python vxlan_tool.py"
- cmd = cmd + " -i " + iface + " -d forward -v off"
- if block:
- cmd = "python vxlan_tool.py -i eth0 -d forward -v off -b " + port
-
- cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'"
- run_cmd_remote(sf, cmd)
-
-# Run netcat on a give machine, Can be VM
-
-
-def netcat(s_ip, c_ip, port="80", timeout=5):
- cmd = "nc -zv "
- cmd = cmd + " -w %s %s %s" % (timeout, s_ip, port)
- cmd = cmd + " 2>&1"
- output = run_cmd_remote(c_ip, cmd)
- logger.info("%s" % output)
- return output
-
-
-def is_ssh_blocked(srv_prv_ip, client_ip):
- res = netcat(srv_prv_ip, client_ip, port="22")
- match = re.search("nc:.*timed out:.*", res, re.M)
- if match:
- return True
-
- return False
-
-
-def is_http_blocked(srv_prv_ip, client_ip):
- res = netcat(srv_prv_ip, client_ip, port="80")
- match = re.search(".* 80 port.* succeeded!", res, re.M)
- if match:
- return False
-
- return True
-
-
-def capture_err_logs(controller_clients, compute_clients, error):
- ovs_logger = ovs_utils.OVSLogger(
- os.path.join(os.getcwd(), 'ovs-logs'),
- FUNCTEST_RESULTS_DIR)
-
- timestamp = time.strftime("%Y%m%d-%H%M%S")
- ovs_logger.dump_ovs_logs(controller_clients,
- compute_clients,
- related_error=error,
- timestamp=timestamp)
- return
-
-
-def update_json_results(name, result):
- json_results.update({name: result})
- if result is not "Passed":
- json_results["failures"] += 1
-
- return
-
-
-def get_ssh_clients(role):
- clients = []
- for ip in get_openstack_node_ips(role):
- s_client = ssh_utils.get_ssh_client(ip,
- 'root',
- proxy=PROXY)
- clients.append(s_client)
-
- return clients
-
-# Check SSH connectivity to VNFs
-
-
-def check_ssh(ips, retries=100):
- check = [False, False]
- logger.info("Checking SSH connectivity to the SFs with ips %s" % str(ips))
- while retries and not all(check):
- for index, ip in enumerate(ips):
- check[index] = run_cmd_remote(ip, "exit")
-
- if all(check):
- logger.info("SSH connectivity to the SFs established")
- return True
-
- time.sleep(3)
- retries -= 1
-
- return False
-
-
-def main():
- installer_type = os.environ.get("INSTALLER_TYPE")
- if installer_type != "fuel":
- logger.error(
- '\033[91mCurrently supported only Fuel Installer type\033[0m')
- sys.exit(1)
-
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
-
- env_list = run_cmd_on_fm("fuel2 env list -f json")
- fuel_env = os.environ.get("FUEL_ENV")
- if len(eval(env_list)) > 1 and fuel_env is None:
- out = run_cmd_on_fm("fuel env")
- logger.error(
- '\033[91mMore than one fuel env found\033[0m\n %s' % out)
- logger.error(
- '\033[91mexport FUEL_ENV=<env-id> to set ENV\033[0m')
- sys.exit(1)
-
- start_time = time.time()
- status = "PASS"
- configure_iptables()
- download_image()
- _, custom_flv_id = os_utils.get_or_create_flavor(
- FLAVOR, 1500, 10, 1, public=True)
- if not custom_flv_id:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- glance_client = os_utils.get_glance_client()
- neutron_client = os_utils.get_neutron_client()
- nova_client = os_utils.get_nova_client()
-
- controller_clients = get_ssh_clients("controller")
- compute_clients = get_ssh_clients("compute")
-
- image_id = setup_glance(glance_client)
- network_id = setup_neutron(neutron_client)
- sg_id = setup_security_groups(neutron_client)
-
- boot_instance(
- nova_client, CLIENT, FLAVOR, image_id, network_id, sg_id)
- srv_prv_ip = boot_instance(
- nova_client, SERVER, FLAVOR, image_id, network_id, sg_id)
-
- subprocess.call(TACKER_SCRIPT, shell=True)
- server_ip, client_ip, sf1, sf2 = get_floating_ips(
- nova_client, neutron_client)
-
- if not check_ssh([sf1, sf2]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_ip)
- if not start_http_server(server_ip):
- logger.error(
- '\033[91mFailed to start HTTP server on %s\033[0m' % server_ip)
- sys.exit(1)
-
- logger.info("Starting HTTP firewall on %s" % sf2)
- vxlan_firewall(sf2, port="80")
- logger.info("Starting SSH firewall on %s" % sf1)
- vxlan_firewall(sf1, port="22")
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- time.sleep(120)
-
- logger.info("Test SSH")
- if is_ssh_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 1 [PASSED] ==> SSH BLOCKED\033[0m')
- update_json_results("Test 1: SSH Blocked", "Passed")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
- logger.error(error)
- capture_err_logs(controller_clients, compute_clients, error)
- update_json_results("Test 1: SSH Blocked", "Failed")
-
- logger.info("Test HTTP")
- if not is_http_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 2 [PASSED] ==> HTTP WORKS\033[0m')
- update_json_results("Test 2: HTTP works", "Passed")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- capture_err_logs(controller_clients, compute_clients, error)
- update_json_results("Test 2: HTTP works", "Failed")
-
- logger.info("Changing the classification")
- subprocess.call(TACKER_CHANGECLASSI, shell=True)
- logger.info("Wait for ODL to update the classification rules in OVS")
- time.sleep(100)
-
- logger.info("Test HTTP")
- if is_http_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 3 [PASSED] ==> HTTP Blocked\033[0m')
- update_json_results("Test 3: HTTP Blocked", "Passed")
- else:
- error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- capture_err_logs(controller_clients, compute_clients, error)
- update_json_results("Test 3: HTTP Blocked", "Failed")
-
- logger.info("Test SSH")
- if not is_ssh_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 4 [PASSED] ==> SSH Works\033[0m')
- update_json_results("Test 4: SSH Works", "Passed")
- else:
- error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
- logger.error(error)
- capture_err_logs(controller_clients, compute_clients, error)
- update_json_results("Test 4: SSH Works", "Failed")
-
- if json_results["failures"]:
- status = "FAIL"
- logger.error('\033[91mSFC TESTS: %s :( FOUND %s FAIL \033[0m' % (
- status, json_results["failures"]))
-
- if args.report:
- stop_time = time.time()
- logger.debug("Promise Results json: " + str(json_results))
- ft_utils.push_results_to_db("sfc",
- "functest-odl-sfc",
- start_time,
- stop_time,
- status,
- json_results)
-
- if status == "PASS":
- logger.info('\033[92mSFC ALL TESTS: %s :)\033[0m' % status)
- sys.exit(0)
-
- sys.exit(1)
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/features/sfc/sfc_change_classi.bash b/testcases/features/sfc/sfc_change_classi.bash
deleted file mode 100755
index 70375ab3b..000000000
--- a/testcases/features/sfc/sfc_change_classi.bash
+++ /dev/null
@@ -1,7 +0,0 @@
-tacker sfc-classifier-delete red_http
-tacker sfc-classifier-delete red_ssh
-
-tacker sfc-classifier-create --name blue_http --chain blue --match source_port=0,dest_port=80,protocol=6
-tacker sfc-classifier-create --name blue_ssh --chain blue --match source_port=0,dest_port=22,protocol=6
-
-tacker sfc-classifier-list
diff --git a/testcases/features/sfc/sfc_colorado1.py b/testcases/features/sfc/sfc_colorado1.py
deleted file mode 100755
index d31541d13..000000000
--- a/testcases/features/sfc/sfc_colorado1.py
+++ /dev/null
@@ -1,596 +0,0 @@
-import os
-import subprocess
-import sys
-import time
-import argparse
-import paramiko
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-import SSHUtils as ssh_utils
-import ovs_utils
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("ODL_SFC").getLogger()
-
-FUNCTEST_RESULTS_DIR = '/home/opnfv/functest/results/odl-sfc'
-FUNCTEST_REPO = ft_utils.FUNCTEST_REPO
-
-HOME = os.environ['HOME'] + "/"
-
-VM_BOOT_TIMEOUT = 180
-INSTANCE_NAME = "client"
-FLAVOR = "custom"
-IMAGE_NAME = "sf_nsh_colorado"
-IMAGE_FILENAME = "sf_nsh_colorado.qcow2"
-IMAGE_FORMAT = "qcow2"
-IMAGE_PATH = "/home/opnfv/functest/data" + "/" + IMAGE_FILENAME
-
-# NEUTRON Private Network parameters
-
-NET_NAME = "example-net"
-SUBNET_NAME = "example-subnet"
-SUBNET_CIDR = "11.0.0.0/24"
-ROUTER_NAME = "example-router"
-
-SECGROUP_NAME = "example-sg"
-SECGROUP_DESCR = "Example Security group"
-
-INSTANCE_NAME_2 = "server"
-
-# TEST_DB = ft_utils.get_parameter_from_yaml("results.test_db_url")
-
-PRE_SETUP_SCRIPT = 'sfc_pre_setup.bash'
-TACKER_SCRIPT = 'sfc_tacker.bash'
-TEARDOWN_SCRIPT = "sfc_teardown.bash"
-TACKER_CHANGECLASSI = "sfc_change_classi.bash"
-
-ssh_options = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-
-PROXY = {
- 'ip': '10.20.0.2',
- 'username': 'root',
- 'password': 'r00tme'
-}
-
-
-def check_ssh(ip):
- cmd = "sshpass -p opnfv ssh " + ssh_options + " -q " + ip + " exit"
- success = subprocess.call(cmd, shell=True) == 0
- if not success:
- logger.debug("Wating for SSH connectivity in SF with IP: %s" % ip)
- return success
-
-
-def main():
-
- # Allow any port so that tacker commands reaches the server.
- # This will be deleted when tacker is included in OPNFV installation
-
- status = "PASS"
- failures = 0
- start_time = time.time()
- json_results = {}
-
- contr_cmd = ("sshpass -p r00tme ssh " + ssh_options + " root@10.20.0.2"
- " 'fuel node'|grep controller|awk '{print $10}'")
- logger.info("Executing script to get ip_server: '%s'" % contr_cmd)
- process = subprocess.Popen(contr_cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- ip_server = process.stdout.readline().rstrip()
-
- comp_cmd = ("sshpass -p r00tme ssh " + ssh_options + " root@10.20.0.2"
- " 'fuel node'|grep compute|awk '{print $10}'")
- logger.info("Executing script to get compute IPs: '%s'" % comp_cmd)
- process = subprocess.Popen(comp_cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- ip_computes = [ip.strip() for ip in process.stdout.readlines()]
-
- iptable_cmd1 = ("sshpass -p r00tme ssh " + ssh_options + " root@10.20.0.2"
- " ssh " + ip_server + " iptables -P INPUT ACCEPT ")
- iptable_cmd2 = ("sshpass -p r00tme ssh " + ssh_options + " root@10.20.0.2"
- " ssh " + ip_server + " iptables -t nat -P INPUT ACCEPT ")
- iptable_cmd3 = ("sshpass -p r00tme ssh " + ssh_options + " root@10.20.0.2"
- " ssh " + ssh_options + " " + ip_server +
- " iptables -A INPUT -m state"
- " --state NEW,ESTABLISHED,RELATED -j ACCEPT")
-
- logger.info("Changing firewall policy in controller: '%s'" % iptable_cmd1)
- subprocess.call(iptable_cmd1, shell=True, stderr=subprocess.PIPE)
-
- logger.info("Changing firewall policy in controller: '%s'" % iptable_cmd2)
- subprocess.call(iptable_cmd2, shell=True, stderr=subprocess.PIPE)
-
- logger.info("Changing firewall policy in controller: '%s'" % iptable_cmd3)
- subprocess.call(iptable_cmd2, shell=True, stderr=subprocess.PIPE)
-
-# Getting the different clients
-
- nova_client = os_utils.get_nova_client()
- neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
-
- ovs_logger = ovs_utils.OVSLogger(
- os.path.join(os.getcwd(), 'ovs-logs'),
- FUNCTEST_RESULTS_DIR)
-
- controller_clients = [ssh_utils.get_ssh_client(ip_server,
- 'root',
- proxy=PROXY)]
- compute_clients = []
- for c_ip in ip_computes:
- c_client = ssh_utils.get_ssh_client(c_ip,
- 'root',
- proxy=PROXY)
- compute_clients.append(c_client)
-
-# Download the image
-
- if not os.path.isfile(IMAGE_PATH):
- logger.info("Downloading image")
- ft_utils.download_url(
- "http://artifacts.opnfv.org/sfc/demo/sf_nsh_colorado.qcow2",
- "/home/opnfv/functest/data/")
- else:
- logger.info("Using old image")
-
-# Create glance image and the neutron network
-
- image_id = os_utils.create_glance_image(glance_client,
- IMAGE_NAME,
- IMAGE_PATH,
- disk=IMAGE_FORMAT,
- container="bare",
- public=True)
-
- network_dic = os_utils.create_network_full(neutron_client,
- NET_NAME,
- SUBNET_NAME,
- ROUTER_NAME,
- SUBNET_CIDR)
- if not network_dic:
- logger.error(
- "There has been a problem when creating the neutron network")
- sys.exit(-1)
-
- network_id = network_dic["net_id"]
-
- sg_id = os_utils.create_security_group_full(neutron_client,
- SECGROUP_NAME, SECGROUP_DESCR)
-
- secgroups = os_utils.get_security_groups(neutron_client)
-
- for sg in secgroups:
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'ingress', 'udp',
- port_range_min=67,
- port_range_max=68)
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'egress', 'udp',
- port_range_min=67,
- port_range_max=68)
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'ingress', 'tcp',
- port_range_min=22,
- port_range_max=22)
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'egress', 'tcp',
- port_range_min=22,
- port_range_max=22)
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'ingress', 'tcp',
- port_range_min=80,
- port_range_max=80)
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'egress', 'tcp',
- port_range_min=80,
- port_range_max=80)
-
- _, custom_flv_id = os_utils.get_or_create_flavor(
- 'custom', 1500, 10, 1, public=True)
- if not custom_flv_id:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- iterator = 0
- while(iterator < 6):
- # boot INSTANCE
- logger.info("Creating instance '%s'..." % INSTANCE_NAME)
- logger.debug(
- "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
- "network=%s \n" % (INSTANCE_NAME, FLAVOR, image_id, network_id))
- instance = os_utils.create_instance_and_wait_for_active(
- FLAVOR,
- image_id,
- network_id,
- INSTANCE_NAME,
- av_zone='nova')
-
- if instance is None:
- logger.error("Error while booting instance.")
- iterator += 1
- continue
- # Retrieve IP of INSTANCE
- instance_ip = instance.networks.get(NET_NAME)[0]
- logger.debug("Instance '%s' got private ip '%s'." %
- (INSTANCE_NAME, instance_ip))
-
- logger.info("Adding '%s' to security group '%s'..."
- % (INSTANCE_NAME, SECGROUP_NAME))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
-
- logger.info("Creating floating IP for VM '%s'..." % INSTANCE_NAME)
- floatip_dic = os_utils.create_floating_ip(neutron_client)
- floatip_client = floatip_dic['fip_addr']
- # floatip_id = floatip_dic['fip_id']
-
- if floatip_client is None:
- logger.error("Cannot create floating IP.")
- iterator += 1
- continue
- logger.info("Floating IP created: '%s'" % floatip_client)
-
- logger.info("Associating floating ip: '%s' to VM '%s' "
- % (floatip_client, INSTANCE_NAME))
- if not os_utils.add_floating_ip(nova_client,
- instance.id,
- floatip_client):
- logger.error("Cannot associate floating IP to VM.")
- iterator += 1
- continue
-
- # STARTING SECOND VM (server) ###
-
- # boot INTANCE
- logger.info("Creating instance '%s'..." % INSTANCE_NAME_2)
- logger.debug(
- "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
- "network=%s \n" % (INSTANCE_NAME_2, FLAVOR, image_id, network_id))
- instance_2 = os_utils.create_instance_and_wait_for_active(
- FLAVOR,
- image_id,
- network_id,
- INSTANCE_NAME_2,
- av_zone='nova')
-
- if instance_2 is None:
- logger.error("Error while booting instance.")
- iterator += 1
- continue
- # Retrieve IP of INSTANCE
- instance_ip_2 = instance_2.networks.get(NET_NAME)[0]
- logger.debug("Instance '%s' got private ip '%s'." %
- (INSTANCE_NAME_2, instance_ip_2))
-
- logger.info("Adding '%s' to security group '%s'..."
- % (INSTANCE_NAME_2, SECGROUP_NAME))
- os_utils.add_secgroup_to_instance(nova_client, instance_2.id, sg_id)
-
- logger.info("Creating floating IP for VM '%s'..." % INSTANCE_NAME_2)
- floatip_dic = os_utils.create_floating_ip(neutron_client)
- floatip_server = floatip_dic['fip_addr']
- # floatip_id = floatip_dic['fip_id']
-
- if floatip_server is None:
- logger.error("Cannot create floating IP.")
- iterator += 1
- continue
- logger.info("Floating IP created: '%s'" % floatip_server)
-
- logger.info("Associating floating ip: '%s' to VM '%s' "
- % (floatip_server, INSTANCE_NAME_2))
-
- if not os_utils.add_floating_ip(nova_client,
- instance_2.id,
- floatip_server):
- logger.error("Cannot associate floating IP to VM.")
- iterator += 1
- continue
-
- # CREATION OF THE 2 SF ####
-
- tacker_script = "%s/testcases/features/sfc/%s" % \
- (FUNCTEST_REPO, TACKER_SCRIPT)
- logger.info("Executing tacker script: '%s'" % tacker_script)
- subprocess.call(tacker_script, shell=True)
-
- # SSH CALL TO START HTTP SERVER
- ssh = paramiko.SSHClient()
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
- try:
- ssh.connect(floatip_server, username="root",
- password="opnfv", timeout=2)
- command = "python -m SimpleHTTPServer 80 > /dev/null 2>&1 &"
- logger.info("Starting HTTP server")
- (stdin, stdout, stderr) = ssh.exec_command(command)
- except:
- logger.debug("Waiting for %s..." % floatip_server)
- time.sleep(6)
- # timeout -= 1
-
- instances = nova_client.servers.list(search_opts={'all_tenants': 1})
- ips = []
- try:
- for instance in instances:
- if "server" not in instance.name:
- if "client" not in instance.name:
- logger.debug(
- "This is the instance name: %s " % instance.name)
- floatip_dic = os_utils.create_floating_ip(
- neutron_client)
- floatip = floatip_dic['fip_addr']
- ips.append(floatip)
- instance.add_floating_ip(floatip)
- except:
- logger.debug("Problems assigning floating IP to SFs")
-
- # If no IPs were obtained, then we cant continue
- if not ips:
- logger.error('Failed to obtain IPs, cant continue, exiting')
- return
-
- logger.debug("Floating IPs for SFs: %s..." % ips)
-
- # Check SSH connectivity to VNFs
- r = 0
- retries = 100
- check = [False, False]
-
- logger.info("Checking SSH connectivity to the SFs with ips {0}"
- .format(str(ips)))
- while r < retries and not all(check):
- try:
- check = [check_ssh(ips[0]), check_ssh(ips[1])]
- except Exception:
- logger.exception("SSH check failed")
- check = [False, False]
- time.sleep(3)
- r += 1
-
- if not all(check):
- logger.error("Cannot establish SSH connection to the SFs")
- iterator += 1
- continue
-
- logger.info("SSH connectivity to the SFs established")
-
- # SSH TO START THE VXLAN_TOOL ON SF1
- logger.info("Configuring the SFs")
- try:
- ssh.connect(ips[0], username="root",
- password="opnfv", timeout=2)
- command = ("nohup python vxlan_tool.py -i eth0 "
- "-d forward -v off -b 80 > /dev/null 2>&1 &")
- (stdin, stdout, stderr) = ssh.exec_command(command)
- except:
- logger.debug("Waiting for %s..." % ips[0])
- time.sleep(6)
- # timeout -= 1
-
- try:
- n = 0
- while 1:
- (stdin, stdout, stderr) = ssh.exec_command(
- "ps aux | grep \"vxlan_tool.py\" | grep -v grep")
- if len(stdout.readlines()) > 0:
- logger.debug("HTTP firewall started")
- break
- else:
- n += 1
- if (n > 7):
- break
- logger.debug("HTTP firewall not started")
- time.sleep(3)
- except Exception:
- logger.exception("vxlan_tool not started in SF1")
-
- # SSH TO START THE VXLAN_TOOL ON SF2
- try:
- ssh.connect(ips[1], username="root",
- password="opnfv", timeout=2)
- command = ("nohup python vxlan_tool.py -i eth0 "
- "-d forward -v off -b 22 > /dev/null 2>&1 &")
- (stdin, stdout, stderr) = ssh.exec_command(command)
- except:
- logger.debug("Waiting for %s..." % ips[1])
- time.sleep(6)
- # timeout -= 1
-
- try:
- n = 0
- while 1:
- (stdin, stdout, stderr) = ssh.exec_command(
- "ps aux | grep \"vxlan_tool.py\" | grep -v grep")
- if len(stdout.readlines()) > 0:
- logger.debug("SSH firewall started")
- break
- else:
- n += 1
- if (n > 7):
- break
- logger.debug("SSH firewall not started")
- time.sleep(3)
- except Exception:
- logger.exception("vxlan_tool not started in SF2")
-
- i = 0
-
- # SSH TO EXECUTE cmd_client
- logger.info("TEST STARTED")
- time.sleep(70)
- try:
- ssh.connect(floatip_client, username="root",
- password="opnfv", timeout=2)
- command = "nc -w 5 -zv " + instance_ip_2 + " 22 2>&1"
- (stdin, stdout, stderr) = ssh.exec_command(command)
-
- # WRITE THE CORRECT WAY TO DO LOGGING
- if "timed out" in stdout.readlines()[0]:
- logger.info('\033[92m' + "TEST 1 [PASSED] "
- "==> SSH BLOCKED" + '\033[0m')
- i = i + 1
- json_results.update({"Test 1: SSH Blocked": "Passed"})
- else:
- timestamp = time.strftime("%Y%m%d-%H%M%S")
- error = ('\033[91m' + "TEST 1 [FAILED] "
- "==> SSH NOT BLOCKED" + '\033[0m')
- logger.error(error)
- ovs_logger.dump_ovs_logs(controller_clients,
- compute_clients,
- related_error=error,
- timestamp=timestamp)
- status = "FAIL"
- json_results.update({"Test 1: SSH Blocked": "Failed"})
- failures += 1
- except:
- logger.debug("Waiting for %s..." % floatip_client)
- time.sleep(6)
- # timeout -= 1
-
- # SSH TO EXECUTE cmd_client
- try:
- ssh.connect(floatip_client, username="root",
- password="opnfv", timeout=2)
- command = "nc -w 5 -zv " + instance_ip_2 + " 80 2>&1"
- (stdin, stdout, stderr) = ssh.exec_command(command)
-
- if "succeeded" in stdout.readlines()[0]:
- logger.info('\033[92m' + "TEST 2 [PASSED] "
- "==> HTTP WORKS" + '\033[0m')
- i = i + 1
- json_results.update({"Test 2: HTTP works": "Passed"})
- else:
- timestamp = time.strftime("%Y%m%d-%H%M%S")
- error = ('\033[91m' + "TEST 2 [FAILED] "
- "==> HTTP BLOCKED" + '\033[0m')
- logger.error(error)
- ovs_logger.dump_ovs_logs(controller_clients,
- compute_clients,
- related_error=error,
- timestamp=timestamp)
- status = "FAIL"
- json_results.update({"Test 2: HTTP works": "Failed"})
- failures += 1
- except:
- logger.debug("Waiting for %s..." % floatip_client)
- time.sleep(6)
- # timeout -= 1
-
- # CHANGE OF CLASSIFICATION #
- logger.info("Changing the classification")
- tacker_classi = "%s/testcases/features/sfc/%s" % \
- (FUNCTEST_REPO, TACKER_CHANGECLASSI)
- subprocess.call(tacker_classi, shell=True)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- time.sleep(100)
-
- # SSH TO EXECUTE cmd_client
-
- try:
- ssh.connect(floatip_client, username="root",
- password="opnfv", timeout=2)
- command = "nc -w 5 -zv " + instance_ip_2 + " 80 2>&1"
- (stdin, stdout, stderr) = ssh.exec_command(command)
-
- if "timed out" in stdout.readlines()[0]:
- logger.info('\033[92m' + "TEST 3 [PASSED] "
- "==> HTTP BLOCKED" + '\033[0m')
- i = i + 1
- json_results.update({"Test 3: HTTP Blocked": "Passed"})
- else:
- timestamp = time.strftime("%Y%m%d-%H%M%S")
- error = ('\033[91m' + "TEST 3 [FAILED] "
- "==> HTTP NOT BLOCKED" + '\033[0m')
- logger.error(error)
- ovs_logger.dump_ovs_logs(controller_clients,
- compute_clients,
- related_error=error,
- timestamp=timestamp)
- status = "FAIL"
- json_results.update({"Test 3: HTTP Blocked": "Failed"})
- failures += 1
- except:
- logger.debug("Waiting for %s..." % floatip_client)
- time.sleep(6)
- # timeout -= 1
-
- # SSH TO EXECUTE cmd_client
- try:
- ssh.connect(floatip_client, username="root",
- password="opnfv", timeout=2)
- command = "nc -w 5 -zv " + instance_ip_2 + " 22 2>&1"
- (stdin, stdout, stderr) = ssh.exec_command(command)
-
- if "succeeded" in stdout.readlines()[0]:
- logger.info('\033[92m' + "TEST 4 [PASSED] "
- "==> SSH WORKS" + '\033[0m')
- i = i + 1
- json_results.update({"Test 4: SSH works": "Passed"})
- else:
- timestamp = time.strftime("%Y%m%d-%H%M%S")
- error = ('\033[91m' + "TEST 4 [FAILED] "
- "==> SSH BLOCKED" + '\033[0m')
- logger.error(error)
- ovs_logger.dump_ovs_logs(controller_clients,
- compute_clients,
- related_error=error,
- timestamp=timestamp)
- status = "FAIL"
- json_results.update({"Test 4: SSH works": "Failed"})
- failures += 1
- except:
- logger.debug("Waiting for %s..." % floatip_client)
- time.sleep(6)
- # timeout -= 1
-
- ovs_logger.create_artifact_archive()
-
- iterator += 1
- if i == 4:
- for x in range(0, 5):
- logger.info('\033[92m' + "SFC TEST WORKED"
- " :) \n" + '\033[0m')
- break
- else:
- logger.info("Iterating again!")
- delete = "bash %s/testcases/features/sfc/delete.sh" % \
- (FUNCTEST_REPO)
- try:
- subprocess.call(delete, shell=True, stderr=subprocess.PIPE)
- time.sleep(10)
- except Exception, e:
- logger.error("Problem when executing the delete.sh")
- logger.error("Problem %s" % e)
-
- if args.report:
- stop_time = time.time()
- json_results.update({"tests": "4", "failures": int(failures)})
- logger.debug("Promise Results json: " + str(json_results))
- ft_utils.push_results_to_db("sfc",
- "functest-odl-sfc",
- start_time,
- stop_time,
- status,
- json_results)
- if status == "PASS":
- sys.exit(0)
- else:
- sys.exit(1)
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/features/sfc/sfc_tacker.bash b/testcases/features/sfc/sfc_tacker.bash
deleted file mode 100755
index 690d5f52e..000000000
--- a/testcases/features/sfc/sfc_tacker.bash
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-BASEDIR=`dirname $0`
-
-#import VNF descriptor
-tacker vnfd-create --vnfd-file ${BASEDIR}/test-vnfd1.yaml
-tacker vnfd-create --vnfd-file ${BASEDIR}/test-vnfd2.yaml
-
-#create instances of the imported VNF
-tacker vnf-create --name testVNF1 --vnfd-name test-vnfd1
-tacker vnf-create --name testVNF2 --vnfd-name test-vnfd2
-
-key=true
-while $key;do
- sleep 3
- active=`tacker vnf-list | grep -E 'PENDING|ERROR'`
- echo -e "checking if SFs are up: $active"
- if [ -z "$active" ]; then
- key=false
- fi
-done
-
-#create service chain
-tacker sfc-create --name red --chain testVNF1
-tacker sfc-create --name blue --chain testVNF2
-
-#create classifier
-tacker sfc-classifier-create --name red_http --chain red --match source_port=0,dest_port=80,protocol=6
-tacker sfc-classifier-create --name red_ssh --chain red --match source_port=0,dest_port=22,protocol=6
-
-tacker sfc-list
-tacker sfc-classifier-list
diff --git a/testcases/features/sfc/tacker_client_install.sh b/testcases/features/sfc/tacker_client_install.sh
deleted file mode 100755
index adb9a44be..000000000
--- a/testcases/features/sfc/tacker_client_install.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-MYDIR=$(dirname $(readlink -f "$0"))
-CLIENT=$(echo python-python-tackerclient_*_all.deb)
-CLIREPO="tacker-client"
-
-# Function checks whether a python egg is available, if not, installs
-function chkPPkg() {
- PKG="$1"
- IPPACK=$(python - <<'____EOF'
-import pip
-from os.path import join
-for package in pip.get_installed_distributions():
- print(package.location)
- print(join(package.location, *package._get_metadata("top_level.txt")))
-____EOF
-)
- echo "$IPPACK" | grep -q "$PKG"
- if [ $? -ne 0 ];then
- pip install "$PKG"
- fi
-}
-
-function envSetup() {
- apt-get install -y python-all debhelper fakeroot
- #pip install --upgrade python-keystoneclient==1.7.4
- chkPPkg stdeb
-}
-
-# Function installs python-tackerclient from github
-function deployTackerClient() {
- cd $MYDIR
- git clone -b 'SFC_refactor' https://github.com/trozet/python-tackerclient.git $CLIREPO
- cd $CLIREPO
- python setup.py --command-packages=stdeb.command bdist_deb
- cd "deb_dist"
- CLIENT=$(echo python-python-tackerclient_*_all.deb)
- cp $CLIENT $MYDIR
- dpkg -i "${MYDIR}/${CLIENT}"
- apt-get -f -y install
- dpkg -i "${MYDIR}/${CLIENT}"
-}
-
-envSetup
-deployTackerClient
diff --git a/testcases/features/sfc/test-vnfd1.yaml b/testcases/features/sfc/test-vnfd1.yaml
deleted file mode 100644
index 5c672e388..000000000
--- a/testcases/features/sfc/test-vnfd1.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-template_name: test-vnfd1
-description: firewall1-example
-
-service_properties:
- Id: firewall1-vnfd
- vendor: tacker
- version: 1
- type:
- - firewall1
-vdus:
- vdu1:
- id: vdu1
- vm_image: sf_nsh_colorado
- instance_type: custom
- service_type: firewall1
-
- network_interfaces:
- management:
- network: example-net
- management: true
-
- placement_policy:
- availability_zone: nova
-
- auto-scaling: noop
- monitoring_policy: noop
- failure_policy: respawn
-
- config:
- param0: key0
- param1: key1
diff --git a/testcases/features/sfc/test-vnfd2.yaml b/testcases/features/sfc/test-vnfd2.yaml
deleted file mode 100644
index 8a570ab92..000000000
--- a/testcases/features/sfc/test-vnfd2.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-template_name: test-vnfd2
-description: firewall2-example
-
-service_properties:
- Id: firewall2-vnfd
- vendor: tacker
- version: 1
- type:
- - firewall2
-vdus:
- vdu1:
- id: vdu1
- vm_image: sf_nsh_colorado
- instance_type: custom
- service_type: firewall2
-
- network_interfaces:
- management:
- network: example-net
- management: true
-
- placement_policy:
- availability_zone: nova
-
- auto-scaling: noop
- monitoring_policy: noop
- failure_policy: respawn
-
- config:
- param0: key0
- param1: key1
diff --git a/testcases/security_scan/config.ini b/testcases/security_scan/config.ini
deleted file mode 100644
index b97de80fa..000000000
--- a/testcases/security_scan/config.ini
+++ /dev/null
@@ -1,29 +0,0 @@
-[undercloud]
-port = 22
-user = stack
-remotekey = /home/stack/.ssh/id_rsa
-localkey = /root/.ssh/overCloudKey
-
-[controller]
-port = 22
-user = heat-admin
-scantype = xccdf
-secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml
-cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml
-profile = stig-rhel7-server-upstream
-report = report.html
-results = results.xml
-reports_dir=/home/opnfv/functest/results/security_scan/
-clean = True
-
-[compute]
-port = 22
-user = heat-admin
-scantype = xccdf
-secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml
-cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml
-profile = sstig-rhel7-server-upstream
-report = report.html
-results = results.xml
-reports_dir=/home/opnfv/functest/results/security_scan/
-clean = True
diff --git a/testcases/security_scan/connect.py b/testcases/security_scan/connect.py
deleted file mode 100644
index 18ca96d80..000000000
--- a/testcases/security_scan/connect.py
+++ /dev/null
@@ -1,244 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Red Hat
-# Luke Hinds (lhinds@redhat.com)
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# 0.1: OpenSCAP paramiko connection functions
-
-import os
-import socket
-import paramiko
-
-import functest.utils.functest_logger as ft_logger
-
-# add installer IP from env
-INSTALLER_IP = os.getenv('INSTALLER_IP')
-
-# Set up loggers
-logger = ft_logger.Logger("security_scan").getLogger()
-paramiko.util.log_to_file("/var/log/paramiko.log")
-
-
-class SetUp:
- def __init__(self, *args):
- self.args = args
-
- def keystonepass(self):
- com = self.args[0]
- client = paramiko.SSHClient()
- privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
- selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- try:
- client.connect(INSTALLER_IP, port=22, username='stack',
- pkey=selectedkey)
- except paramiko.SSHException:
- logger.error("Password is invalid for "
- "undercloud host: {0}".format(INSTALLER_IP))
- except paramiko.AuthenticationException:
- logger.error("Authentication failed for "
- "undercloud host: {0}".format(INSTALLER_IP))
- except socket.error:
- logger.error("Socker Connection failed for "
- "undercloud host: {0}".format(INSTALLER_IP))
- stdin, stdout, stderr = client.exec_command(com)
- return stdout.read()
- client.close()
-
- def getockey(self):
- remotekey = self.args[0]
- localkey = self.args[1]
- privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
- selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
- transport = paramiko.Transport((INSTALLER_IP, 22))
- transport.connect(username='stack', pkey=selectedkey)
- try:
- sftp = paramiko.SFTPClient.from_transport(transport)
- except paramiko.SSHException:
- logger.error("Authentication failed for "
- "host: {0}".format(INSTALLER_IP))
- except paramiko.AuthenticationException:
- logger.error("Authentication failed for "
- "host: {0}".format(INSTALLER_IP))
- except socket.error:
- logger.error("Socker Connection failed for "
- "undercloud host: {0}".format(INSTALLER_IP))
- sftp.get(remotekey, localkey)
- sftp.close()
- transport.close()
-
-
-class ConnectionManager:
- def __init__(self, host, port, user, localkey, *args):
- self.host = host
- self.port = port
- self.user = user
- self.localkey = localkey
- self.args = args
-
- def remotescript(self):
- localpath = self.args[0]
- remotepath = self.args[1]
- com = self.args[2]
-
- client = paramiko.SSHClient()
- privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
- selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- # Connection to undercloud
- try:
- client.connect(INSTALLER_IP, port=22, username='stack',
- pkey=selectedkey)
- except paramiko.SSHException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except paramiko.AuthenticationException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except socket.error:
- logger.error("Socker Connection failed for "
- "undercloud host: {0}".format(self.host))
-
- transport = client.get_transport()
- local_addr = ('127.0.0.1', 0)
- channel = transport.open_channel("direct-tcpip",
- (self.host, int(self.port)),
- (local_addr))
- remote_client = paramiko.SSHClient()
- remote_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- # Tunnel to overcloud
- try:
- remote_client.connect('127.0.0.1', port=22, username=self.user,
- key_filename=self.localkey, sock=channel)
- sftp = remote_client.open_sftp()
- sftp.put(localpath, remotepath)
- except paramiko.SSHException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except paramiko.AuthenticationException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except socket.error:
- logger.error("Socker Connection failed for "
- "undercloud host: {0}".format(self.host))
-
- output = ""
- stdin, stdout, stderr = remote_client.exec_command(com)
- stdout = stdout.readlines()
- # remove script
- sftp.remove(remotepath)
- remote_client.close()
- client.close()
- # Pipe back stout
- for line in stdout:
- output = output + line
- if output != "":
- return output
-
- def remotecmd(self):
- com = self.args[0]
-
- client = paramiko.SSHClient()
- privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
- selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- # Connection to undercloud
- try:
- client.connect(INSTALLER_IP, port=22, username='stack',
- pkey=selectedkey)
- except paramiko.SSHException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except paramiko.AuthenticationException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except socket.error:
- logger.error("Socker Connection failed for "
- "undercloud host: {0}".format(self.host))
-
- transport = client.get_transport()
- local_addr = ('127.0.0.1', 0) # 0 denotes choose random port
- channel = transport.open_channel("direct-tcpip",
- (self.host, int(self.port)),
- (local_addr))
- remote_client = paramiko.SSHClient()
- remote_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- # Tunnel to overcloud
- try:
- remote_client.connect('127.0.0.1', port=22, username=self.user,
- key_filename=self.localkey, sock=channel)
- except paramiko.SSHException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except paramiko.AuthenticationException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except socket.error:
- logger.error("Socker Connection failed for "
- "undercloud host: {0}".format(self.host))
-
- chan = remote_client.get_transport().open_session()
- chan.get_pty()
- feed = chan.makefile()
- chan.exec_command(com)
- print feed.read()
-
- remote_client.close()
- client.close()
-
- def download_reports(self):
- dl_folder = self.args[0]
- reportfile = self.args[1]
- reportname = self.args[2]
- resultsname = self.args[3]
- client = paramiko.SSHClient()
- privatekeyfile = os.path.expanduser('/root/.ssh/id_rsa')
- selectedkey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- # Connection to overcloud
- try:
- client.connect(INSTALLER_IP, port=22, username='stack',
- pkey=selectedkey)
- except paramiko.SSHException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except paramiko.AuthenticationException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except socket.error:
- logger.error("Socker Connection failed for "
- "undercloud host: {0}".format(self.host))
-
- transport = client.get_transport()
- local_addr = ('127.0.0.1', 0) # 0 denotes choose random port
- channel = transport.open_channel("direct-tcpip",
- (self.host, int(self.port)),
- (local_addr))
- remote_client = paramiko.SSHClient()
- remote_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- # Tunnel to overcloud
- try:
- remote_client.connect('127.0.0.1', port=22, username=self.user,
- key_filename=self.localkey, sock=channel)
- except paramiko.SSHException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except paramiko.AuthenticationException:
- logger.error("Authentication failed for "
- "host: {0}".format(self.host))
- except socket.error:
- logger.error("Socker Connection failed for "
- "undercloud host: {0}".format(self.host))
- # Download the reports
- sftp = remote_client.open_sftp()
- logger.info("Downloading \"{0}\"...".format(reportname))
- sftp.get(reportfile, ('{0}/{1}'.format(dl_folder, reportname)))
- logger.info("Downloading \"{0}\"...".format(resultsname))
- sftp.get(reportfile, ('{0}/{1}'.format(dl_folder, resultsname)))
- sftp.close()
- transport.close()
diff --git a/testcases/security_scan/examples/xccdf-rhel7-server-upstream.ini b/testcases/security_scan/examples/xccdf-rhel7-server-upstream.ini
deleted file mode 100644
index 43b2e82d6..000000000
--- a/testcases/security_scan/examples/xccdf-rhel7-server-upstream.ini
+++ /dev/null
@@ -1,29 +0,0 @@
-[undercloud]
-port = 22
-user = stack
-remotekey = /home/stack/.ssh/id_rsa
-localkey = /root/.ssh/overCloudKey
-
-[controller]
-port = 22
-user = heat-admin
-scantype = xccdf
-secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml
-cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml
-profile = stig-rhel7-server-upstream
-report = report.hmtl
-results = results.xml
-reports_dir=/home/opnfv/functest/results/security_scan/
-clean = True
-
-[compute]
-port = 22
-user = heat-admin
-scantype = xccdf
-secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml
-cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml
-profile = stig-rhel7-server-upstream
-report = report.hmtl
-results = results.xml
-reports_dir=/home/opnfv/functest/results/security_scan/
-clean = True
diff --git a/testcases/security_scan/examples/xccdf-standard.ini b/testcases/security_scan/examples/xccdf-standard.ini
deleted file mode 100644
index bfbcf82d3..000000000
--- a/testcases/security_scan/examples/xccdf-standard.ini
+++ /dev/null
@@ -1,29 +0,0 @@
-[undercloud]
-port = 22
-user = stack
-remotekey = /home/stack/.ssh/id_rsa
-localkey = /root/.ssh/overCloudKey
-
-[controller]
-port = 22
-user = heat-admin
-scantype = xccdf
-secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml
-cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml
-profile = standard
-report = report.hmtl
-results = results.xml
-reports_dir=/home/opnfv/functest/results/security_scan/
-clean = True
-
-[compute]
-port = 22
-user = heat-admin
-scantype = xccdf
-secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml
-cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml
-profile = standard
-report = report.hmtl
-results = results.xml
-reports_dir=/home/opnfv/functest/results/security_scan/
-clean = True
diff --git a/testcases/security_scan/scripts/createfiles.py b/testcases/security_scan/scripts/createfiles.py
deleted file mode 100644
index b828901a5..000000000
--- a/testcases/security_scan/scripts/createfiles.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Red Hat
-# Luke Hinds (lhinds@redhat.com)
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# 0.1: This script creates the needed local files into a tmp directory. Should
-# '--clean' be passed, all files will be removed, post scan.
-
-
-import os
-import tempfile
-
-files = ['results.xml', 'report.html', 'syschar.xml']
-
-
-directory_name = tempfile.mkdtemp()
-
-for i in files:
- os.system("touch %s/%s" % (directory_name, i))
-
-print directory_name
diff --git a/testcases/security_scan/scripts/internet_check.py b/testcases/security_scan/scripts/internet_check.py
deleted file mode 100644
index 1bed50a70..000000000
--- a/testcases/security_scan/scripts/internet_check.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Red Hat
-# Luke Hinds (lhinds@redhat.com)
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Performs simple connection check, falls to default timeout of 10 seconds
-
-import socket
-
-TEST_HOST = "google.com"
-
-
-def is_connected():
- try:
- host = socket.gethostbyname(TEST_HOST)
- socket.create_connection((host, 80), 2)
- return True
- except:
- return False
-print is_connected()
diff --git a/testcases/security_scan/security_scan.py b/testcases/security_scan/security_scan.py
deleted file mode 100755
index 98e6b7a8c..000000000
--- a/testcases/security_scan/security_scan.py
+++ /dev/null
@@ -1,215 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Red Hat
-# Luke Hinds (lhinds@redhat.com)
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# 0.1: This script installs OpenSCAP on the remote host, and scans the
-# nominated node. Post scan a report is downloaded and if '--clean' is passed
-# all trace of the scan is removed from the remote system.
-
-
-import datetime
-import os
-import sys
-from ConfigParser import SafeConfigParser
-
-import argparse
-from keystoneclient import session
-from keystoneclient.auth.identity import v2
-from novaclient import client
-
-import connect
-import functest.utils.functest_utils as ft_utils
-
-__version__ = 0.1
-__author__ = 'Luke Hinds (lhinds@redhat.com)'
-__url__ = 'https://wiki.opnfv.org/display/functest/Functest+Security'
-
-# Global vars
-INSTALLER_IP = os.getenv('INSTALLER_IP')
-oscapbin = 'sudo /bin/oscap'
-functest_dir = '%s/testcases/security_scan/' % ft_utils.FUNCTEST_REPO
-
-# Apex Spefic var needed to query Undercloud
-if os.getenv('OS_AUTH_URL') is None:
- connect.logger.error(" Enviroment variable OS_AUTH_URL is not set")
- sys.exit(0)
-else:
- OS_AUTH_URL = os.getenv('OS_AUTH_URL')
-
-# args
-parser = argparse.ArgumentParser(description='OPNFV OpenSCAP Scanner')
-parser.add_argument('--config', action='store', dest='cfgfile',
- help='Config file', required=True)
-args = parser.parse_args()
-
-# Config Parser
-cfgparse = SafeConfigParser()
-cfgparse.read(args.cfgfile)
-
-# Grab Undercloud key
-remotekey = cfgparse.get('undercloud', 'remotekey')
-localkey = cfgparse.get('undercloud', 'localkey')
-setup = connect.SetUp(remotekey, localkey)
-setup.getockey()
-
-
-# Configure Nova Credentials
-com = 'sudo /usr/bin/hiera admin_password'
-setup = connect.SetUp(com)
-keypass = setup.keystonepass()
-auth = v2.Password(auth_url=OS_AUTH_URL,
- username='admin',
- password=str(keypass).rstrip(),
- tenant_name='admin')
-sess = session.Session(auth=auth)
-nova = client.Client(2, session=sess)
-
-
-def run_tests(host, nodetype):
- user = cfgparse.get(nodetype, 'user')
- port = cfgparse.get(nodetype, 'port')
- connect.logger.info("Host: {0} Selected Profile: {1}".format(host,
- nodetype))
- connect.logger.info("Checking internet for package installation...")
- if internet_check(host, nodetype):
- connect.logger.info("Internet Connection OK.")
- connect.logger.info("Creating temp file structure..")
- createfiles(host, port, user, localkey)
- connect.logger.debug("Installing OpenSCAP...")
- install_pkg(host, port, user, localkey)
- connect.logger.debug("Running scan...")
- run_scanner(host, port, user, localkey, nodetype)
- clean = cfgparse.get(nodetype, 'clean')
- connect.logger.info("Post installation tasks....")
- post_tasks(host, port, user, localkey, nodetype)
- if clean:
- connect.logger.info("Cleaning down environment....")
- connect.logger.debug("Removing OpenSCAP....")
- removepkg(host, port, user, localkey, nodetype)
- connect.logger.info("Deleting tmp file and reports (remote)...")
- cleandir(host, port, user, localkey, nodetype)
- else:
- connect.logger.error("Internet timeout. Moving on to next node..")
- pass
-
-
-def nova_iterate():
- # Find compute nodes, active with network on ctlplane
- for server in nova.servers.list():
- if server.status == 'ACTIVE' and 'compute' in server.name:
- networks = server.networks
- nodetype = 'compute'
- for host in networks['ctlplane']:
- run_tests(host, nodetype)
- # Find controller nodes, active with network on ctlplane
- elif server.status == 'ACTIVE' and 'controller' in server.name:
- networks = server.networks
- nodetype = 'controller'
- for host in networks['ctlplane']:
- run_tests(host, nodetype)
-
-
-def internet_check(host, nodetype):
- import connect
- user = cfgparse.get(nodetype, 'user')
- port = cfgparse.get(nodetype, 'port')
- localpath = functest_dir + 'scripts/internet_check.py'
- remotepath = '/tmp/internet_check.py'
- com = 'python /tmp/internet_check.py'
- testconnect = connect.ConnectionManager(host, port, user, localkey,
- localpath, remotepath, com)
- connectionresult = testconnect.remotescript()
- if connectionresult.rstrip() == 'True':
- return True
- else:
- return False
-
-
-def createfiles(host, port, user, localkey):
- import connect
- global tmpdir
- localpath = functest_dir + 'scripts/createfiles.py'
- remotepath = '/tmp/createfiles.py'
- com = 'python /tmp/createfiles.py'
- connect = connect.ConnectionManager(host, port, user, localkey,
- localpath, remotepath, com)
- tmpdir = connect.remotescript()
-
-
-def install_pkg(host, port, user, localkey):
- import connect
- com = 'sudo yum -y install openscap-scanner scap-security-guide'
- connect = connect.ConnectionManager(host, port, user, localkey, com)
- connect.remotecmd()
-
-
-def run_scanner(host, port, user, localkey, nodetype):
- import connect
- scantype = cfgparse.get(nodetype, 'scantype')
- profile = cfgparse.get(nodetype, 'profile')
- results = cfgparse.get(nodetype, 'results')
- report = cfgparse.get(nodetype, 'report')
- secpolicy = cfgparse.get(nodetype, 'secpolicy')
- # Here is where we contruct the actual scan command
- if scantype == 'xccdf':
- cpe = cfgparse.get(nodetype, 'cpe')
- com = '{0} xccdf eval --profile {1} --results {2}/{3}' \
- ' --report {2}/{4} --cpe {5} {6}'.format(oscapbin,
- profile,
- tmpdir.rstrip(),
- results,
- report,
- cpe,
- secpolicy)
- connect = connect.ConnectionManager(host, port, user, localkey, com)
- connect.remotecmd()
- elif scantype == 'oval':
- com = '{0} oval eval --results {1}/{2} '
- '--report {1}/{3} {4}'.format(oscapbin, tmpdir.rstrip(),
- results, report, secpolicy)
- connect = connect.ConnectionManager(host, port, user, localkey, com)
- connect.remotecmd()
- else:
- com = '{0} oval-collect '.format(oscapbin)
- connect = connect.ConnectionManager(host, port, user, localkey, com)
- connect.remotecmd()
-
-
-def post_tasks(host, port, user, localkey, nodetype):
- import connect
- # Create the download folder for functest dashboard and download reports
- reports_dir = cfgparse.get(nodetype, 'reports_dir')
- dl_folder = os.path.join(reports_dir, host + "_" +
- datetime.datetime.
- now().strftime('%Y-%m-%d_%H-%M-%S'))
- os.makedirs(dl_folder, 0755)
- report = cfgparse.get(nodetype, 'report')
- results = cfgparse.get(nodetype, 'results')
- reportfile = '{0}/{1}'.format(tmpdir.rstrip(), report)
- connect = connect.ConnectionManager(host, port, user, localkey, dl_folder,
- reportfile, report, results)
- connect.download_reports()
-
-
-def removepkg(host, port, user, localkey, nodetype):
- import connect
- com = 'sudo yum -y remove openscap-scanner scap-security-guide'
- connect = connect.ConnectionManager(host, port, user, localkey, com)
- connect.remotecmd()
-
-
-def cleandir(host, port, user, localkey, nodetype):
- import connect
- com = 'sudo rm -r {0}'.format(tmpdir.rstrip())
- connect = connect.ConnectionManager(host, port, user, localkey, com)
- connect.remotecmd()
-
-
-if __name__ == '__main__':
- nova_iterate()
diff --git a/testcases/vnf/vIMS/clearwater.py b/testcases/vnf/vIMS/clearwater.py
deleted file mode 100644
index 7236f4fba..000000000
--- a/testcases/vnf/vIMS/clearwater.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/python
-# coding: utf8
-#######################################################################
-#
-# Copyright (c) 2015 Orange
-# valentin.boucher@orange.com
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-########################################################################
-
-
-class clearwater:
-
- def __init__(self, inputs={}, orchestrator=None, logger=None):
- self.config = inputs
- self.orchestrator = orchestrator
- self.logger = logger
- self.deploy = False
-
- def set_orchestrator(self, orchestrator):
- self.orchestrator = orchestrator
-
- def set_flavor_id(self, flavor_id):
- self.config['flavor_id'] = flavor_id
-
- def set_image_id(self, image_id):
- self.config['image_id'] = image_id
-
- def set_agent_user(self, agent_user):
- self.config['agent_user'] = agent_user
-
- def set_external_network_name(self, external_network_name):
- self.config['external_network_name'] = external_network_name
-
- def set_public_domain(self, public_domain):
- self.config['public_domain'] = public_domain
-
- def deploy_vnf(self, blueprint, bp_name='clearwater',
- dep_name='clearwater-opnfv'):
- if self.orchestrator:
- self.dep_name = dep_name
- error = self.orchestrator.download_upload_and_deploy_blueprint(
- blueprint, self.config, bp_name, dep_name)
- if error:
- return error
-
- self.deploy = True
-
- else:
- if self.logger:
- self.logger.error("Cloudify manager is down or not provide...")
-
- def undeploy_vnf(self):
- if self.orchestrator:
- if self.deploy:
- self.deploy = False
- self.orchestrator.undeploy_deployment(self.dep_name)
- else:
- if self.logger:
- self.logger.error("Clearwater isn't already deploy...")
- else:
- if self.logger:
- self.logger.error("Cloudify manager is down or not provide...")
diff --git a/testcases/vnf/vIMS/create_venv.sh b/testcases/vnf/vIMS/create_venv.sh
deleted file mode 100755
index 575fd177c..000000000
--- a/testcases/vnf/vIMS/create_venv.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash -e
-
-# Script checks that venv exists. If it doesn't it will be created
-# It requires python2.7 and virtualenv packages installed
-#
-# Copyright (c) 2015 Orange
-# valentin.boucher@orange.com
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-BASEDIR=`dirname $0`
-VENV_PATH=$1
-VENV_NAME="venv_cloudify"
-function venv_install() {
- if command -v virtualenv-2.7; then
- virtualenv-2.7 $1
- elif command -v virtualenv2; then
- virtualenv2 $1
- elif command -v virtualenv; then
- virtualenv $1
- else
- echo Cannot find virtualenv command.
- return 1
- fi
-}
-
-# exit when something goes wrong during venv install
-set -e
-if [ ! -d "$VENV_PATH/$VENV_NAME" ]; then
- venv_install $VENV_PATH/$VENV_NAME
- echo "Virtualenv" + $VENV_NAME + "created."
-fi
-
-if [ ! -f "$VENV_PATH/$VENV_NAME/updated" -o $BASEDIR/requirements.pip -nt $VENV_PATH/$VENV_NAME/updated ]; then
- source $VENV_PATH/$VENV_NAME/bin/activate
- pip install -r $BASEDIR/requirements.pip
- touch $VENV_PATH/$VENV_NAME/updated
- echo "Requirements installed."
- deactivate
-fi
-set +e
diff --git a/testcases/vnf/vIMS/orchestrator.py b/testcases/vnf/vIMS/orchestrator.py
deleted file mode 100644
index 61157a4fb..000000000
--- a/testcases/vnf/vIMS/orchestrator.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/python
-# coding: utf8
-#######################################################################
-#
-# Copyright (c) 2015 Orange
-# valentin.boucher@orange.com
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-########################################################################
-
-import os
-import shutil
-import subprocess32 as subprocess
-import yaml
-
-from git import Repo
-
-import functest.utils.functest_logger as ft_logger
-
-
-class orchestrator:
-
- def __init__(self, testcase_dir, inputs={}):
- self.testcase_dir = testcase_dir
- self.blueprint_dir = testcase_dir + 'cloudify-manager-blueprint/'
- self.input_file = 'inputs.yaml'
- self.manager_blueprint = False
- self.config = inputs
- self.logger = ft_logger.Logger("Orchestrator").getLogger()
- self.manager_up = False
-
- def set_credentials(self, username, password, tenant_name, auth_url):
- self.config['keystone_username'] = username
- self.config['keystone_password'] = password
- self.config['keystone_url'] = auth_url
- self.config['keystone_tenant_name'] = tenant_name
-
- def set_flavor_id(self, flavor_id):
- self.config['flavor_id'] = flavor_id
-
- def set_image_id(self, image_id):
- self.config['image_id'] = image_id
-
- def set_external_network_name(self, external_network_name):
- self.config['external_network_name'] = external_network_name
-
- def set_ssh_user(self, ssh_user):
- self.config['ssh_user'] = ssh_user
-
- def set_nova_url(self, nova_url):
- self.config['nova_url'] = nova_url
-
- def set_neutron_url(self, neutron_url):
- self.config['neutron_url'] = neutron_url
-
- def set_nameservers(self, nameservers):
- if 0 < len(nameservers):
- self.config['dns_subnet_1'] = nameservers[0]
-
- def download_manager_blueprint(self, manager_blueprint_url,
- manager_blueprint_branch):
- if self.manager_blueprint:
- self.logger.info(
- "cloudify manager server blueprint is "
- "already downloaded !")
- else:
- self.logger.info(
- "Downloading the cloudify manager server blueprint")
- download_result = self._download_blueprints(
- manager_blueprint_url,
- manager_blueprint_branch,
- self.blueprint_dir)
-
- if not download_result:
- self.logger.error("Failed to download manager blueprint")
- exit(-1)
- else:
- self.manager_blueprint = True
-
- def manager_up(self):
- return self.manager_up
-
- def deploy_manager(self):
- if self.manager_blueprint:
- self.logger.info("Writing the inputs file")
- with open(self.blueprint_dir + "inputs.yaml", "w") as f:
- f.write(yaml.dump(self.config, default_style='"'))
- f.close()
-
- # Ensure no ssh key file already exists
- key_files = ["/.ssh/cloudify-manager-kp.pem",
- "/.ssh/cloudify-agent-kp.pem"]
- home = os.path.expanduser("~")
-
- for key_file in key_files:
- if os.path.isfile(home + key_file):
- os.remove(home + key_file)
-
- self.logger.info("Launching the cloudify-manager deployment")
- script = "set -e; "
- script += ("source " + self.testcase_dir +
- "venv_cloudify/bin/activate; ")
- script += "cd " + self.testcase_dir + "; "
- script += "cfy init -r; "
- script += "cd cloudify-manager-blueprint; "
- script += ("cfy local create-requirements -o requirements.txt " +
- "-p openstack-manager-blueprint.yaml; ")
- script += "pip install -r requirements.txt; "
- script += ("cfy bootstrap --install-plugins " +
- "-p openstack-manager-blueprint.yaml -i inputs.yaml; ")
- cmd = "/bin/bash -c '" + script + "'"
- error = execute_command(cmd, self.logger)
- if error:
- return error
-
- self.logger.info("Cloudify-manager server is UP !")
-
- self.manager_up = True
-
- def undeploy_manager(self):
- self.logger.info("Launching the cloudify-manager undeployment")
-
- self.manager_up = False
-
- script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; "
- script += "cd " + self.testcase_dir + "; "
- script += "cfy teardown -f --ignore-deployments; "
- cmd = "/bin/bash -c '" + script + "'"
- execute_command(cmd, self.logger)
-
- self.logger.info(
- "Cloudify-manager server has been successfully removed!")
-
- def download_upload_and_deploy_blueprint(self, blueprint, config,
- bp_name, dep_name):
- self.logger.info("Downloading the {0} blueprint".format(
- blueprint['file_name']))
- destination_folder = self.testcase_dir + \
- blueprint['destination_folder']
- download_result = self._download_blueprints(blueprint['url'],
- blueprint['branch'],
- destination_folder)
-
- if not download_result:
- self.logger.error(
- "Failed to download blueprint {0}".
- format(blueprint['file_name']))
- exit(-1)
-
- self.logger.info("Writing the inputs file")
-
- with open(self.testcase_dir + blueprint['destination_folder'] +
- "/inputs.yaml", "w") as f:
- f.write(yaml.dump(config, default_style='"'))
- f.close()
-
- self.logger.info("Launching the {0} deployment".format(bp_name))
- script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; "
- script += ("cd " + self.testcase_dir +
- blueprint['destination_folder'] + "; ")
- script += ("cfy blueprints upload -b " +
- bp_name + " -p openstack-blueprint.yaml; ")
- script += ("cfy deployments create -b " + bp_name +
- " -d " + dep_name + " --inputs inputs.yaml; ")
- script += ("cfy executions start -w install -d " +
- dep_name + " --timeout 1800; ")
-
- cmd = "/bin/bash -c '" + script + "'"
- error = execute_command(cmd, self.logger, 2000)
- if error:
- return error
- self.logger.info("The deployment of {0} is ended".format(dep_name))
-
- def undeploy_deployment(self, dep_name):
- self.logger.info("Launching the {0} undeployment".format(dep_name))
- script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; "
- script += "cd " + self.testcase_dir + "; "
- script += ("cfy executions start -w uninstall -d " + dep_name +
- " --timeout 1800 ; ")
- script += "cfy deployments delete -d " + dep_name + "; "
-
- cmd = "/bin/bash -c '" + script + "'"
- try:
- execute_command(cmd, self.logger)
- except:
- self.logger.error("Clearwater undeployment failed")
-
- def _download_blueprints(self, blueprint_url, branch, dest_path):
- if os.path.exists(dest_path):
- shutil.rmtree(dest_path)
- try:
- Repo.clone_from(blueprint_url, dest_path, branch=branch)
- return True
- except:
- return False
-
-
-def execute_command(cmd, logger, timeout=1800):
- """
- Execute Linux command
- """
- if logger:
- logger.debug('Executing command : {}'.format(cmd))
- timeout_exception = False
- output_file = "output.txt"
- f = open(output_file, 'w+')
- try:
- p = subprocess.call(cmd, shell=True, stdout=f,
- stderr=subprocess.STDOUT, timeout=timeout)
- except subprocess.TimeoutExpired:
- timeout_exception = True
- if logger:
- logger.error("TIMEOUT when executing command %s" % cmd)
- pass
-
- f.close()
- f = open(output_file, 'r')
- result = f.read()
- if result != "" and logger:
- logger.debug(result)
- if p == 0:
- return False
- else:
- if logger and not timeout_exception:
- logger.error("Error when executing command %s" % cmd)
- f = open(output_file, 'r')
- lines = f.readlines()
- result = lines[len(lines) - 3]
- result += lines[len(lines) - 2]
- result += lines[len(lines) - 1]
- return result
diff --git a/testcases/vnf/vIMS/requirements.pip b/testcases/vnf/vIMS/requirements.pip
deleted file mode 100644
index ab26f6e02..000000000
--- a/testcases/vnf/vIMS/requirements.pip
+++ /dev/null
@@ -1 +0,0 @@
-cloudify==3.3.1 \ No newline at end of file
diff --git a/testcases/vnf/vIMS/vIMS.py b/testcases/vnf/vIMS/vIMS.py
deleted file mode 100755
index 50aa715f4..000000000
--- a/testcases/vnf/vIMS/vIMS.py
+++ /dev/null
@@ -1,536 +0,0 @@
-#!/usr/bin/python
-# coding: utf8
-#######################################################################
-#
-# Copyright (c) 2015 Orange
-# valentin.boucher@orange.com
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-########################################################################
-
-import datetime
-import json
-import os
-import pprint
-import subprocess
-import time
-
-import argparse
-import keystoneclient.v2_0.client as ksclient
-import novaclient.client as nvclient
-import requests
-from neutronclient.v2_0 import client as ntclient
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-from clearwater import clearwater
-from orchestrator import orchestrator
-
-pp = pprint.PrettyPrinter(indent=4)
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-parser.add_argument("-n", "--noclean",
- help="Don't clean the created resources for this test.",
- action="store_true")
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("vIMS").getLogger()
-
-
-# Cloudify parameters
-VIMS_DIR = ft_utils.FUNCTEST_REPO + '/' + \
- ft_utils.get_functest_config('general.directories.dir_vIMS')
-
-VIMS_DATA_DIR = \
- ft_utils.get_functest_config('general.directories.dir_vIMS_data') + \
- '/'
-VIMS_TEST_DIR = \
- ft_utils.get_functest_config('general.directories.dir_repo_vims_test') + \
- '/'
-DB_URL = \
- ft_utils.get_functest_config('results.test_db_url')
-
-TENANT_NAME = \
- ft_utils.get_functest_config('vIMS.general.tenant_name')
-TENANT_DESCRIPTION = \
- ft_utils.get_functest_config('vIMS.general.tenant_description')
-IMAGES = \
- ft_utils.get_functest_config('vIMS.general.images')
-
-CFY_MANAGER_BLUEPRINT = \
- ft_utils.get_functest_config('vIMS.cloudify.blueprint')
-CFY_MANAGER_REQUIERMENTS = \
- ft_utils.get_functest_config('vIMS.cloudify.requierments')
-CFY_INPUTS = ft_utils.get_functest_config('vIMS.cloudify.inputs')
-
-CW_BLUEPRINT = \
- ft_utils.get_functest_config('vIMS.clearwater.blueprint')
-CW_DEPLOYMENT_NAME = \
- ft_utils.get_functest_config('vIMS.clearwater.deployment-name')
-CW_INPUTS = \
- ft_utils.get_functest_config('vIMS.clearwater.inputs')
-CW_REQUIERMENTS = \
- ft_utils.get_functest_config('vIMS.clearwater.requierments')
-
-CFY_DEPLOYMENT_DURATION = 0
-CW_DEPLOYMENT_DURATION = 0
-
-TESTCASE_START_TIME = time.time()
-RESULTS = {'orchestrator': {'duration': 0, 'result': ''},
- 'vIMS': {'duration': 0, 'result': ''},
- 'sig_test': {'duration': 0, 'result': ''}}
-
-
-def download_and_add_image_on_glance(glance, image_name, image_url):
- dest_path = VIMS_DATA_DIR + "tmp/"
- if not os.path.exists(dest_path):
- os.makedirs(dest_path)
- file_name = image_url.rsplit('/')[-1]
- if not ft_utils.download_url(image_url, dest_path):
- logger.error("Failed to download image %s" % file_name)
- return False
-
- image = os_utils.create_glance_image(
- glance, image_name, dest_path + file_name)
- if not image:
- logger.error("Failed to upload image on glance")
- return False
-
- return image
-
-
-def step_failure(step_name, error_msg):
- logger.error(error_msg)
- set_result(step_name, 0, error_msg)
- status = "FAIL"
- # in case of failure starting and stoping time are not correct
- stop_time = time.time()
- if step_name == "sig_test":
- status = "PASS"
- ft_utils.push_results_to_db("functest",
- "vims",
- TESTCASE_START_TIME,
- stop_time,
- status,
- RESULTS)
- exit(-1)
-
-
-def set_result(step_name, duration=0, result=""):
- RESULTS[step_name] = {'duration': duration, 'result': result}
-
-
-def test_clearwater():
- script = "source " + VIMS_DATA_DIR + "venv_cloudify/bin/activate; "
- script += "cd " + VIMS_DATA_DIR + "; "
- script += "cfy status | grep -Eo \"([0-9]{1,3}\.){3}[0-9]{1,3}\""
- cmd = "/bin/bash -c '" + script + "'"
-
- try:
- logger.debug("Trying to get clearwater manager IP ... ")
- mgr_ip = os.popen(cmd).read()
- mgr_ip = mgr_ip.splitlines()[0]
- except:
- step_failure("sig_test", "Unable to retrieve the IP of the "
- "cloudify manager server !")
-
- api_url = "http://" + mgr_ip + "/api/v2"
- dep_outputs = requests.get(api_url + "/deployments/" +
- CW_DEPLOYMENT_NAME + "/outputs")
- dns_ip = dep_outputs.json()['outputs']['dns_ip']
- ellis_ip = dep_outputs.json()['outputs']['ellis_ip']
-
- ellis_url = "http://" + ellis_ip + "/"
- url = ellis_url + "accounts"
-
- params = {"password": "functest",
- "full_name": "opnfv functest user",
- "email": "functest@opnfv.fr",
- "signup_code": "secret"}
-
- rq = requests.post(url, data=params)
- i = 20
- while rq.status_code != 201 and i > 0:
- rq = requests.post(url, data=params)
- i = i - 1
- time.sleep(10)
-
- if rq.status_code == 201:
- url = ellis_url + "session"
- rq = requests.post(url, data=params)
- cookies = rq.cookies
-
- url = ellis_url + "accounts/" + params['email'] + "/numbers"
- if cookies != "":
- rq = requests.post(url, cookies=cookies)
- i = 24
- while rq.status_code != 200 and i > 0:
- rq = requests.post(url, cookies=cookies)
- i = i - 1
- time.sleep(25)
-
- if rq.status_code != 200:
- step_failure("sig_test", "Unable to create a number: %s"
- % rq.json()['reason'])
-
- start_time_ts = time.time()
- end_time_ts = start_time_ts
- logger.info("vIMS functional test Start Time:'%s'" % (
- datetime.datetime.fromtimestamp(start_time_ts).strftime(
- '%Y-%m-%d %H:%M:%S')))
- nameservers = ft_utils.get_resolvconf_ns()
- resolvconf = ""
- for ns in nameservers:
- resolvconf += "\nnameserver " + ns
-
- if dns_ip != "":
- script = ('echo -e "nameserver ' + dns_ip + resolvconf +
- '" > /etc/resolv.conf; ')
- script += 'source /etc/profile.d/rvm.sh; '
- script += 'cd ' + VIMS_TEST_DIR + '; '
- script += ('rake test[' + CW_INPUTS["public_domain"] +
- '] SIGNUP_CODE="secret"')
-
- cmd = "/bin/bash -c '" + script + "'"
- output_file = "output.txt"
- f = open(output_file, 'w+')
- subprocess.call(cmd, shell=True, stdout=f,
- stderr=subprocess.STDOUT)
- f.close()
- end_time_ts = time.time()
- duration = round(end_time_ts - start_time_ts, 1)
- logger.info("vIMS functional test duration:'%s'" % duration)
- f = open(output_file, 'r')
- result = f.read()
- if result != "" and logger:
- logger.debug(result)
-
- vims_test_result = ""
- try:
- logger.debug("Trying to load test results")
- with open(VIMS_TEST_DIR + "temp.json") as f:
- vims_test_result = json.load(f)
- f.close()
- except:
- logger.error("Unable to retrieve test results")
-
- set_result("sig_test", duration, vims_test_result)
-
- # success criteria for vIMS (for Brahmaputra)
- # - orchestrator deployed
- # - VNF deployed
- # TODO use test criteria defined in config file
- status = "FAIL"
- try:
- if (RESULTS['orchestrator']['duration'] > 0 and
- RESULTS['vIMS']['duration'] > 0):
- status = "PASS"
- except:
- logger.error("Unable to set test status")
-
- ft_utils.push_results_to_db("functest",
- "vims",
- TESTCASE_START_TIME,
- end_time_ts,
- status,
- RESULTS)
-
- try:
- os.remove(VIMS_TEST_DIR + "temp.json")
- except:
- logger.error("Deleting file failed")
-
-
-def main():
-
- # ############### GENERAL INITIALISATION ################
-
- if not os.path.exists(VIMS_DATA_DIR):
- os.makedirs(VIMS_DATA_DIR)
-
- ks_creds = os_utils.get_credentials("keystone")
- nv_creds = os_utils.get_credentials("nova")
- nt_creds = os_utils.get_credentials("neutron")
-
- logger.info("Prepare OpenStack plateform (create tenant and user)")
- keystone = ksclient.Client(**ks_creds)
-
- user_id = os_utils.get_user_id(keystone, ks_creds['username'])
- if user_id == '':
- step_failure("init", "Error : Failed to get id of " +
- ks_creds['username'])
-
- tenant_id = os_utils.create_tenant(
- keystone, TENANT_NAME, TENANT_DESCRIPTION)
- if not tenant_id:
- step_failure("init", "Error : Failed to create " +
- TENANT_NAME + " tenant")
-
- roles_name = ["admin", "Admin"]
- role_id = ''
- for role_name in roles_name:
- if role_id == '':
- role_id = os_utils.get_role_id(keystone, role_name)
-
- if role_id == '':
- logger.error("Error : Failed to get id for %s role" % role_name)
-
- if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id):
- logger.error("Error : Failed to add %s on tenant" %
- ks_creds['username'])
-
- user_id = os_utils.create_user(
- keystone, TENANT_NAME, TENANT_NAME, None, tenant_id)
- if not user_id:
- logger.error("Error : Failed to create %s user" % TENANT_NAME)
-
- logger.info("Update OpenStack creds informations")
- ks_creds.update({
- "username": TENANT_NAME,
- "password": TENANT_NAME,
- "tenant_name": TENANT_NAME,
- })
-
- nt_creds.update({
- "tenant_name": TENANT_NAME,
- })
-
- nv_creds.update({
- "project_id": TENANT_NAME,
- })
-
- logger.info("Upload some OS images if it doesn't exist")
- glance = os_utils.get_glance_client()
-
- for img in IMAGES.keys():
- image_name = IMAGES[img]['image_name']
- image_url = IMAGES[img]['image_url']
-
- image_id = os_utils.get_image_id(glance, image_name)
-
- if image_id == '':
- logger.info("""%s image doesn't exist on glance repository. Try
- downloading this image and upload on glance !""" % image_name)
- image_id = download_and_add_image_on_glance(
- glance, image_name, image_url)
-
- if image_id == '':
- step_failure(
- "init",
- "Error : Failed to find or upload required OS "
- "image for this deployment")
-
- nova = nvclient.Client("2", **nv_creds)
-
- logger.info("Update security group quota for this tenant")
- neutron = ntclient.Client(**nt_creds)
- if not os_utils.update_sg_quota(neutron, tenant_id, 50, 100):
- step_failure(
- "init",
- "Failed to update security group quota for tenant " + TENANT_NAME)
-
- # ############### CLOUDIFY INITIALISATION ################
- public_auth_url = keystone.service_catalog.url_for(
- service_type='identity', endpoint_type='publicURL')
-
- cfy = orchestrator(VIMS_DATA_DIR, CFY_INPUTS)
-
- cfy.set_credentials(username=ks_creds['username'], password=ks_creds[
- 'password'], tenant_name=ks_creds['tenant_name'],
- auth_url=public_auth_url)
-
- logger.info("Collect flavor id for cloudify manager server")
- nova = nvclient.Client("2", **nv_creds)
-
- flavor_name = "m1.large"
- flavor_id = os_utils.get_flavor_id(nova, flavor_name)
- for requirement in CFY_MANAGER_REQUIERMENTS:
- if requirement == 'ram_min':
- flavor_id = os_utils.get_flavor_id_by_ram_range(
- nova, CFY_MANAGER_REQUIERMENTS['ram_min'], 10000)
-
- if flavor_id == '':
- logger.error(
- "Failed to find %s flavor. "
- "Try with ram range default requirement !" % flavor_name)
- flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)
-
- if flavor_id == '':
- step_failure("orchestrator",
- "Failed to find required flavor for this deployment")
-
- cfy.set_flavor_id(flavor_id)
-
- image_name = "centos_7"
- image_id = os_utils.get_image_id(glance, image_name)
- for requirement in CFY_MANAGER_REQUIERMENTS:
- if requirement == 'os_image':
- image_id = os_utils.get_image_id(
- glance, CFY_MANAGER_REQUIERMENTS['os_image'])
-
- if image_id == '':
- step_failure(
- "orchestrator",
- "Error : Failed to find required OS image for cloudify manager")
-
- cfy.set_image_id(image_id)
-
- ext_net = os_utils.get_external_net(neutron)
- if not ext_net:
- step_failure("orchestrator", "Failed to get external network")
-
- cfy.set_external_network_name(ext_net)
-
- ns = ft_utils.get_resolvconf_ns()
- if ns:
- cfy.set_nameservers(ns)
-
- if 'compute' in nova.client.services_url:
- cfy.set_nova_url(nova.client.services_url['compute'])
- if neutron.httpclient.endpoint_url is not None:
- cfy.set_neutron_url(neutron.httpclient.endpoint_url)
-
- logger.info("Prepare virtualenv for cloudify-cli")
- cmd = "chmod +x " + VIMS_DIR + "create_venv.sh"
- ft_utils.execute_command(cmd)
- time.sleep(3)
- cmd = VIMS_DIR + "create_venv.sh " + VIMS_DATA_DIR
- ft_utils.execute_command(cmd)
-
- cfy.download_manager_blueprint(
- CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch'])
-
- # ############### CLOUDIFY DEPLOYMENT ################
- start_time_ts = time.time()
- end_time_ts = start_time_ts
- logger.info("Cloudify deployment Start Time:'%s'" % (
- datetime.datetime.fromtimestamp(start_time_ts).strftime(
- '%Y-%m-%d %H:%M:%S')))
-
- error = cfy.deploy_manager()
- if error:
- step_failure("orchestrator", error)
-
- end_time_ts = time.time()
- duration = round(end_time_ts - start_time_ts, 1)
- logger.info("Cloudify deployment duration:'%s'" % duration)
- set_result("orchestrator", duration, "")
-
- # ############### CLEARWATER INITIALISATION ################
-
- cw = clearwater(CW_INPUTS, cfy, logger)
-
- logger.info("Collect flavor id for all clearwater vm")
- nova = nvclient.Client("2", **nv_creds)
-
- flavor_name = "m1.small"
- flavor_id = os_utils.get_flavor_id(nova, flavor_name)
- for requirement in CW_REQUIERMENTS:
- if requirement == 'ram_min' and flavor_id == '':
- flavor_id = os_utils.get_flavor_id_by_ram_range(
- nova, CW_REQUIERMENTS['ram_min'], 4500)
-
- if flavor_id == '':
- logger.error(
- "Failed to find %s flavor. Try with ram range "
- "default requirement !" % flavor_name)
- flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)
-
- if flavor_id == '':
- step_failure(
- "vIMS", "Failed to find required flavor for this deployment")
-
- cw.set_flavor_id(flavor_id)
-
- image_name = "ubuntu_14.04"
- image_id = os_utils.get_image_id(glance, image_name)
- for requirement in CW_REQUIERMENTS:
- if requirement == 'os_image':
- image_id = os_utils.get_image_id(
- glance, CW_REQUIERMENTS['os_image'])
-
- if image_id == '':
- step_failure(
- "vIMS",
- "Error : Failed to find required OS image for cloudify manager")
-
- cw.set_image_id(image_id)
-
- ext_net = os_utils.get_external_net(neutron)
- if not ext_net:
- step_failure("vIMS", "Failed to get external network")
-
- cw.set_external_network_name(ext_net)
-
- # ############### CLEARWATER DEPLOYMENT ################
-
- start_time_ts = time.time()
- end_time_ts = start_time_ts
- logger.info("vIMS VNF deployment Start Time:'%s'" % (
- datetime.datetime.fromtimestamp(start_time_ts).strftime(
- '%Y-%m-%d %H:%M:%S')))
-
- error = cw.deploy_vnf(CW_BLUEPRINT)
- if error:
- step_failure("vIMS", error)
-
- end_time_ts = time.time()
- duration = round(end_time_ts - start_time_ts, 1)
- logger.info("vIMS VNF deployment duration:'%s'" % duration)
- set_result("vIMS", duration, "")
-
- # ############### CLEARWATER TEST ################
-
- test_clearwater()
-
- # ########## CLEARWATER UNDEPLOYMENT ############
-
- cw.undeploy_vnf()
-
- # ########### CLOUDIFY UNDEPLOYMENT #############
-
- cfy.undeploy_manager()
-
- # ############## GENERAL CLEANUP ################
- if args.noclean:
- exit(0)
-
- ks_creds = os_utils.get_credentials("keystone")
-
- keystone = ksclient.Client(**ks_creds)
-
- logger.info("Removing %s tenant .." % CFY_INPUTS['keystone_tenant_name'])
- tenant_id = os_utils.get_tenant_id(
- keystone, CFY_INPUTS['keystone_tenant_name'])
- if tenant_id == '':
- logger.error("Error : Failed to get id of %s tenant" %
- CFY_INPUTS['keystone_tenant_name'])
- else:
- if not os_utils.delete_tenant(keystone, tenant_id):
- logger.error("Error : Failed to remove %s tenant" %
- CFY_INPUTS['keystone_tenant_name'])
-
- logger.info("Removing %s user .." % CFY_INPUTS['keystone_username'])
- user_id = os_utils.get_user_id(
- keystone, CFY_INPUTS['keystone_username'])
- if user_id == '':
- logger.error("Error : Failed to get id of %s user" %
- CFY_INPUTS['keystone_username'])
- else:
- if not os_utils.delete_user(keystone, user_id):
- logger.error("Error : Failed to remove %s user" %
- CFY_INPUTS['keystone_username'])
-
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/vnf/vRNC/parser.py b/testcases/vnf/vRNC/parser.py
deleted file mode 100755
index 0381fd648..000000000
--- a/testcases/vnf/vRNC/parser.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2016 ZTE Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import argparse
-import time
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
-
-PARSER_REPO = \
- functest_utils.get_functest_config('general.directories.dir_repo_parser')
-RESULTS_DIR = \
- functest_utils.get_functest_config('general.directories.dir_results')
-
-logger = ft_logger.Logger("parser").getLogger()
-
-
-def main():
- project = 'parser'
- case_name = 'parser-basics'
- cmd = 'cd %s/tests && ./functest_run.sh' % PARSER_REPO
-
- start_time = time.time()
- log_file = RESULTS_DIR + "/parser.log"
- ret = functest_utils.execute_command(cmd,
- info=True,
- output_file=log_file)
- stop_time = time.time()
-
- status, details = functest_utils.check_test_result(project,
- ret,
- start_time,
- stop_time)
-
- functest_utils.logger_test_results(project,
- case_name,
- status,
- details)
-
- if args.report:
- logger.debug("Report Parser Results to DB......")
- functest_utils.push_results_to_db(project,
- case_name,
- start_time,
- stop_time,
- status,
- details)
- exit(ret)
-
-if __name__ == '__main__':
- main()