aboutsummaryrefslogtreecommitdiffstats
path: root/functest
diff options
context:
space:
mode:
Diffstat (limited to 'functest')
-rwxr-xr-xfunctest/ci/check_os.sh63
-rw-r--r--functest/ci/config_aarch64_patch.yaml20
-rwxr-xr-xfunctest/ci/config_functest.yaml11
-rwxr-xr-xfunctest/ci/config_patch.yaml6
-rw-r--r--functest/ci/installer_params.yaml16
-rwxr-xr-xfunctest/ci/prepare_env.py140
-rw-r--r--functest/ci/rally_aarch64_patch.conf5
-rwxr-xr-xfunctest/ci/run_tests.py2
-rwxr-xr-xfunctest/ci/testcases.yaml130
-rwxr-xr-xfunctest/ci/tier_builder.py2
-rwxr-xr-xfunctest/ci/tier_handler.py6
-rw-r--r--functest/cli/commands/cli_env.py2
-rw-r--r--functest/cli/commands/cli_os.py2
-rw-r--r--functest/cli/commands/cli_testcase.py2
-rw-r--r--functest/cli/commands/cli_tier.py2
-rw-r--r--functest/core/feature_base.py10
-rw-r--r--functest/core/vnf_base.py15
-rw-r--r--functest/opnfv_tests/features/barometer.py28
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template2
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template2
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt286
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt4
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py15
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_base.py2
-rwxr-xr-xfunctest/opnfv_tests/openstack/vping/vping_ssh.py6
-rwxr-xr-xfunctest/opnfv_tests/sdn/odl/odl.py2
-rw-r--r--functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py14
-rw-r--r--functest/opnfv_tests/sdn/onos/teston/adapters/foundation.py2
-rw-r--r--functest/opnfv_tests/vnf/ims/clearwater.py2
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.py35
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.yaml4
-rw-r--r--functest/opnfv_tests/vnf/ims/opera_ims.py475
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_ims.py465
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_ims.yaml7
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestrator_cloudify.py9
-rwxr-xr-xfunctest/opnfv_tests/vnf/router/__init__.py0
-rwxr-xr-xfunctest/opnfv_tests/vnf/router/vyos_vrouter.py33
-rw-r--r--functest/tests/unit/opnfv_tests/vnf/__init__.py0
-rw-r--r--functest/tests/unit/opnfv_tests/vnf/ims/__init__.py0
-rw-r--r--functest/tests/unit/opnfv_tests/vnf/ims/test_clearwater.py54
-rw-r--r--functest/tests/unit/opnfv_tests/vnf/ims/test_cloudify_ims.py542
-rw-r--r--functest/tests/unit/opnfv_tests/vnf/ims/test_orchestrator_cloudify.py122
-rw-r--r--functest/tests/unit/utils/test_openstack_utils.py58
-rwxr-xr-x[-rw-r--r--]functest/utils/config.py20
-rw-r--r--functest/utils/env.py2
-rwxr-xr-xfunctest/utils/functest_logger.py4
-rw-r--r--functest/utils/functest_utils.py9
-rw-r--r--functest/utils/openstack_tacker.py72
-rwxr-xr-xfunctest/utils/openstack_utils.py87
49 files changed, 2277 insertions, 520 deletions
diff --git a/functest/ci/check_os.sh b/functest/ci/check_os.sh
index b875a173..2c5c021c 100755
--- a/functest/ci/check_os.sh
+++ b/functest/ci/check_os.sh
@@ -6,6 +6,16 @@
# jose.lausuch@ericsson.com
#
+declare -A service_cmd_array
+service_cmd_array['nova']='openstack server list'
+service_cmd_array['neutron']='openstack network list'
+service_cmd_array['keystone']='openstack endpoint list'
+service_cmd_array['cinder']='openstack volume list'
+service_cmd_array['glance']='openstack image list'
+
+MANDATORY_SERVICES='nova neutron keystone glance'
+OPTIONAL_SERVICES='cinder'
+
verify_connectivity() {
for i in $(seq 0 9); do
if echo "test" | nc -v -w 10 $1 $2 &>/dev/null; then
@@ -16,6 +26,34 @@ verify_connectivity() {
return 1
}
+check_service() {
+ local service cmd
+ service=$1
+ cmd=${service_cmd_array[$service]}
+ if [ -z "$2" ]; then
+ required='false'
+ else
+ required=$2
+ fi
+ echo ">>Checking ${service} service..."
+ if ! openstack service list | grep -i ${service} > /dev/null; then
+ if [ "$required" == 'false' ]; then
+ echo "WARN: Optional Service ${service} is not enabled!"
+ return
+ else
+ echo "ERROR: Required Service ${service} is not enabled!"
+ exit 1
+ fi
+ fi
+ $cmd &>/dev/null
+ result=$?
+ if [ $result -ne 0 ]; then
+ echo "ERROR: Failed execution $cmd. The $service does not seem to be working."
+ exit 1
+ else
+ echo " ...OK"
+ fi
+}
if [ -z $OS_AUTH_URL ];then
echo "ERROR: OS_AUTH_URL environment variable missing... Have you sourced the OpenStack credentials?"
@@ -56,25 +94,16 @@ fi
echo " ...OK"
-echo "Checking OpenStack basic services:"
-commands=('openstack endpoint list' 'openstack server list' 'openstack network list' \
- 'openstack image list' 'openstack volume list')
-for cmd in "${commands[@]}"
-do
- service=$(echo $cmd | awk '{print $1, $2}')
- echo ">>Checking $service service..."
- $cmd &>/dev/null
- result=$?
- if [ $result -ne 0 ];
- then
- echo "ERROR: Failed execution $cmd. The $service does not seem to be working."
- exit 1
- else
- echo " ...OK"
- fi
+echo "Checking Required OpenStack services:"
+for service in $MANDATORY_SERVICES; do
+ check_service $service "true"
done
+echo "Required OpenStack services are OK."
-echo "OpenStack services are OK."
+echo "Checking Optional OpenStack services:"
+for service in $OPTIONAL_SERVICES; do
+ check_service $service
+done
echo "Checking External network..."
networks=($(neutron net-list -F id | tail -n +4 | head -n -1 | awk '{print $2}'))
diff --git a/functest/ci/config_aarch64_patch.yaml b/functest/ci/config_aarch64_patch.yaml
new file mode 100644
index 00000000..9a345e3f
--- /dev/null
+++ b/functest/ci/config_aarch64_patch.yaml
@@ -0,0 +1,20 @@
+os:
+ general:
+ openstack:
+ image_name: TestVM
+ image_file_name: cirros-d161201-aarch64-disk.img
+ image_password: gocubsgo
+
+ snaps_simple_healthcheck:
+ disk_image: /home/opnfv/functest/data/cirros-d161201-aarch64-disk.img
+ kernel_image: /home/opnfv/functest/data/cirros-d161201-aarch64-kernel
+ ramdisk_image: /home/opnfv/functest/data/cirros-d161201-aarch64-initramfs
+ extra_properties:
+ os_command_line: root=/dev/vdb1 rw rootwait console=tty0 console=ttyS0 console=ttyAMA0
+ hw_video_model: vga
+
+ vping:
+ image_name: TestVM
+
+ doctor:
+ image_name: TestVM
diff --git a/functest/ci/config_functest.yaml b/functest/ci/config_functest.yaml
index 8fa4bd34..1120e7e1 100755
--- a/functest/ci/config_functest.yaml
+++ b/functest/ci/config_functest.yaml
@@ -21,6 +21,7 @@ general:
dir_repo_onos: /home/opnfv/repos/onos
repo_promise: /home/opnfv/repos/promise
repo_netready: /home/opnfv/repos/netready
+ repo_barometer: /home/opnfv/repos/barometer
repo_doctor: /home/opnfv/repos/doctor
repo_copper: /home/opnfv/repos/copper
dir_repo_ovno: /home/opnfv/repos/ovno
@@ -28,6 +29,7 @@ general:
repo_domino: /home/opnfv/repos/domino
repo_snaps: /home/opnfv/repos/snaps
repo_securityscan: /home/opnfv/repos/securityscanning
+ repo_vrouter: /home/opnfv/repos/vrouter
functest: /home/opnfv/functest
functest_test: /home/opnfv/repos/functest/functest/opnfv_tests
results: /home/opnfv/functest/results
@@ -41,9 +43,11 @@ general:
creds: /home/opnfv/functest/conf/openstack.creds
snapshot_file: /home/opnfv/functest/conf/openstack_snapshot.yaml
- image_name: Cirros-0.3.4
- image_file_name: cirros-0.3.4-x86_64-disk.img
+ image_name: Cirros-0.3.5
+ image_file_name: cirros-0.3.5-x86_64-disk.img
image_disk_format: qcow2
+ image_username: cirros
+ image_password: cubswin:)
flavor_name: opnfv_flavor
flavor_ram: 512
@@ -63,7 +67,7 @@ general:
testcases_yaml: /home/opnfv/repos/functest/functest/ci/testcases.yaml
healthcheck:
- disk_image: /home/opnfv/functest/data/cirros-0.3.4-x86_64-disk.img
+ disk_image: /home/opnfv/functest/data/cirros-0.3.5-x86_64-disk.img
disk_format: qcow2
wait_time: 60
@@ -130,6 +134,7 @@ vnf:
orchestra_ims:
tenant_name: orchestra_ims
tenant_description: ims deployed with openbaton
+ config: orchestra_ims.yaml
opera_ims:
tenant_name: opera_ims
tenant_description: ims deployed with open-o
diff --git a/functest/ci/config_patch.yaml b/functest/ci/config_patch.yaml
index 46064a07..d984a3f4 100755
--- a/functest/ci/config_patch.yaml
+++ b/functest/ci/config_patch.yaml
@@ -1,12 +1,12 @@
lxd:
general:
openstack:
- image_name: Cirros-0.3.4
- image_file_name: cirros-0.3.4-x86_64-lxc.tar.gz
+ image_name: Cirros-0.3.5
+ image_file_name: cirros-0.3.5-x86_64-lxc.tar.gz
image_disk_format: raw
healthcheck:
- disk_image: /home/opnfv/functest/data/cirros-0.3.4-x86_64-lxc.tar.gz
+ disk_image: /home/opnfv/functest/data/cirros-0.3.5-x86_64-lxc.tar.gz
disk_format: raw
fdio:
general:
diff --git a/functest/ci/installer_params.yaml b/functest/ci/installer_params.yaml
new file mode 100644
index 00000000..26aff9bb
--- /dev/null
+++ b/functest/ci/installer_params.yaml
@@ -0,0 +1,16 @@
+apex:
+ ip: ''
+ user: 'stack'
+ pkey: '/root/.ssh/id_rsa'
+# compass:
+# ip: '192.168.200.2'
+# user: 'root'
+# password: 'root'
+fuel:
+ ip: '10.20.0.2'
+ user: 'root'
+ password: 'r00tme'
+# joid:
+# ip: ''
+# user: ''
+# password: ''
diff --git a/functest/ci/prepare_env.py b/functest/ci/prepare_env.py
index 6b24fe08..724ea14d 100755
--- a/functest/ci/prepare_env.py
+++ b/functest/ci/prepare_env.py
@@ -1,48 +1,49 @@
#!/usr/bin/env python
#
-# Author: Jose Lausuch (jose.lausuch@ericsson.com)
-#
-# Installs the Functest framework within the Docker container
-# and run the tests automatically
-#
-#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
-
import argparse
import json
import os
import re
import subprocess
import sys
+import fileinput
import yaml
-from opnfv.utils import constants as opnfv_constants
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
from functest.utils.constants import CONST
+from opnfv.utils import constants as opnfv_constants
+from opnfv.deployment import factory
+
actions = ['start', 'check']
""" logging configuration """
logger = ft_logger.Logger("prepare_env").getLogger()
-
+handler = None
+# set the architecture to default
+pod_arch = None
+arch_filter = ['aarch64']
CONFIG_FUNCTEST_PATH = CONST.CONFIG_FUNCTEST_YAML
CONFIG_PATCH_PATH = os.path.join(os.path.dirname(
CONFIG_FUNCTEST_PATH), "config_patch.yaml")
-
-with open(CONFIG_PATCH_PATH) as f:
- functest_patch_yaml = yaml.safe_load(f)
+CONFIG_AARCH64_PATCH_PATH = os.path.join(os.path.dirname(
+ CONFIG_FUNCTEST_PATH), "config_aarch64_patch.yaml")
+RALLY_CONF_PATH = os.path.join("/etc/rally/rally.conf")
+RALLY_AARCH64_PATCH_PATH = os.path.join(os.path.dirname(
+ CONFIG_FUNCTEST_PATH), "rally_aarch64_patch.conf")
-class PrepareEnvParser():
+class PrepareEnvParser(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
@@ -107,6 +108,38 @@ def check_env_variables():
logger.info(" IS_CI_RUN=%s" % CONST.IS_CI_RUN)
+def get_deployment_handler():
+ global handler
+ global pod_arch
+
+ installer_params_yaml = os.path.join(CONST.dir_repo_functest,
+ 'functest/ci/installer_params.yaml')
+ if (CONST.INSTALLER_IP and CONST.INSTALLER_TYPE and
+ CONST.INSTALLER_TYPE in opnfv_constants.INSTALLERS):
+ try:
+ installer_params = ft_utils.get_parameter_from_yaml(
+ CONST.INSTALLER_TYPE, installer_params_yaml)
+ except ValueError as e:
+ logger.debug('Printing deployment info is not supported for %s' %
+ CONST.INSTALLER_TYPE)
+ logger.debug(e)
+ else:
+ user = installer_params.get('user', None)
+ password = installer_params.get('password', None)
+ pkey = installer_params.get('pkey', None)
+ try:
+ handler = factory.Factory.get_handler(
+ installer=CONST.INSTALLER_TYPE,
+ installer_ip=CONST.INSTALLER_IP,
+ installer_user=user,
+ installer_pwd=password,
+ pkey_file=pkey)
+ if handler:
+ pod_arch = handler.get_arch()
+ except Exception as e:
+ logger.debug("Cannot get deployment information. %s" % e)
+
+
def create_directories():
print_separator()
logger.info("Creating needed directories...")
@@ -168,8 +201,7 @@ def source_rc_file():
raise Exception("The file %s is empty." % CONST.openstack_creds)
logger.info("Sourcing the OpenStack RC file...")
- os_utils.source_credentials(
- CONST.openstack_creds)
+ os_utils.source_credentials(CONST.openstack_creds)
for key, value in os.environ.iteritems():
if re.search("OS_", key):
if key == 'OS_AUTH_URL':
@@ -183,11 +215,22 @@ def source_rc_file():
def patch_config_file():
+ patch_file(CONFIG_PATCH_PATH)
+
+ if pod_arch and pod_arch in arch_filter:
+ patch_file(CONFIG_AARCH64_PATCH_PATH)
+
+
+def patch_file(patch_file_path):
+ logger.debug('Updating file: %s', patch_file_path)
+ with open(patch_file_path) as f:
+ patch_file = yaml.safe_load(f)
+
updated = False
- for key in functest_patch_yaml:
+ for key in patch_file:
if key in CONST.DEPLOY_SCENARIO:
new_functest_yaml = dict(ft_utils.merge_dicts(
- ft_utils.get_functest_yaml(), functest_patch_yaml[key]))
+ ft_utils.get_functest_yaml(), patch_file[key]))
updated = True
if updated:
@@ -215,6 +258,17 @@ def verify_deployment():
def install_rally():
print_separator()
+
+ if pod_arch and pod_arch in arch_filter:
+ logger.info("Apply aarch64 specific to rally config...")
+ with open(RALLY_AARCH64_PATCH_PATH, "r") as f:
+ rally_patch_conf = f.read()
+
+ for line in fileinput.input(RALLY_CONF_PATH, inplace=1):
+ print line,
+ if "cirros|testvm" in line:
+ print rally_patch_conf
+
logger.info("Creating Rally environment...")
cmd = "rally deployment destroy opnfv-rally"
@@ -222,20 +276,19 @@ def install_rally():
"Deployment %s does not exist."
% CONST.rally_deployment_name),
verbose=False)
+
rally_conf = os_utils.get_credentials_for_rally()
with open('rally_conf.json', 'w') as fp:
json.dump(rally_conf, fp)
cmd = ("rally deployment create "
- "--file=rally_conf.json --name={}"
+ "--file=rally_conf.json --name={0}"
.format(CONST.rally_deployment_name))
- ft_utils.execute_command(cmd,
- error_msg=("Problem while creating "
- "Rally deployment"))
+ error_msg = "Problem while creating Rally deployment"
+ ft_utils.execute_command_raise(cmd, error_msg=error_msg)
cmd = "rally deployment check"
- ft_utils.execute_command(cmd,
- error_msg=("OpenStack not responding or "
- "faulty Rally deployment."))
+ error_msg = "OpenStack not responding or faulty Rally deployment."
+ ft_utils.execute_command_raise(cmd, error_msg=error_msg)
cmd = "rally deployment list"
ft_utils.execute_command(cmd,
@@ -250,19 +303,30 @@ def install_rally():
def install_tempest():
logger.info("Installing tempest from existing repo...")
- cmd = ("rally verify create-verifier --source {0} "
- "--name {1} --type tempest"
- .format(CONST.dir_repo_tempest, CONST.tempest_deployment_name))
- ft_utils.execute_command(cmd,
- error_msg="Problem while installing Tempest.")
+ cmd = ("rally verify list-verifiers | "
+ "grep '{0}' | wc -l".format(CONST.tempest_deployment_name))
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
+ while p.poll() is None:
+ line = p.stdout.readline().rstrip()
+ if str(line) == '0':
+ logger.debug("Tempest %s does not exist" %
+ CONST.tempest_deployment_name)
+ cmd = ("rally verify create-verifier --source {0} "
+ "--name {1} --type tempest"
+ .format(CONST.dir_repo_tempest,
+ CONST.tempest_deployment_name))
+ error_msg = "Problem while installing Tempest."
+ ft_utils.execute_command_raise(cmd, error_msg=error_msg)
def create_flavor():
- os_utils.get_or_create_flavor('m1.tiny',
- '512',
- '1',
- '1',
- public=True)
+ _, flavor_id = os_utils.get_or_create_flavor('m1.tiny',
+ '512',
+ '1',
+ '1',
+ public=True)
+ if flavor_id is None:
+ raise Exception('Failed to create flavor')
def check_environment():
@@ -278,6 +342,12 @@ def check_environment():
logger.info("Functest environment is installed.")
+def print_deployment_info():
+ if handler:
+ logger.info('\n\nDeployment information:\n%s' %
+ handler.get_deployment_info())
+
+
def main(**kwargs):
try:
if not (kwargs['action'] in actions):
@@ -286,6 +356,7 @@ def main(**kwargs):
elif kwargs['action'] == "start":
logger.info("######### Preparing Functest environment #########\n")
check_env_variables()
+ get_deployment_handler()
create_directories()
source_rc_file()
patch_config_file()
@@ -296,6 +367,7 @@ def main(**kwargs):
with open(CONST.env_active, "w") as env_file:
env_file.write("1")
check_environment()
+ print_deployment_info()
elif kwargs['action'] == "check":
check_environment()
except Exception as e:
diff --git a/functest/ci/rally_aarch64_patch.conf b/functest/ci/rally_aarch64_patch.conf
new file mode 100644
index 00000000..a49588bf
--- /dev/null
+++ b/functest/ci/rally_aarch64_patch.conf
@@ -0,0 +1,5 @@
+img_name_regex = ^TestVM$
+img_url = http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-disk.img
+flavor_ref_ram = 128
+flavor_ref_alt_ram = 256
+heat_instance_type_ram = 128
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index 93518de0..f920e70d 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -48,7 +48,7 @@ class BlockingTestFailed(Exception):
pass
-class RunTestsParser():
+class RunTestsParser(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index 5bb0a381..598da396 100755
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -8,26 +8,14 @@ tiers:
operations in the VIM.
testcases:
-
- name: healthcheck
- criteria: 'status == "PASS"'
- blocking: true
- description: >-
- This test case verifies the basic OpenStack services like
- Keystone, Glance, Cinder, Neutron and Nova.
-
- dependencies:
- installer: ''
- scenario: '^((?!lxd).)*$'
- -
name: snaps_health_check
criteria: 'status == "PASS"'
- blocking: false
+ blocking: true
description: >-
This test case creates executes the SimpleHealthCheck
Python test class which creates an, image, flavor, network,
and Cirros VM instance and observes the console output to
validate the single port obtains the correct IP address.
-
dependencies:
installer: ''
scenario: '^((?!lxd).)*$'
@@ -325,6 +313,18 @@ tiers:
module: 'functest.opnfv_tests.vnf.rnc.parser'
class: 'Parser'
-
+ name: domino
+ criteria: 'status == "PASS"'
+ blocking: false
+ description: >-
+ Test suite from Domino project.
+ dependencies:
+ installer: ''
+ scenario: ''
+ run:
+ module: 'functest.opnfv_tests.features.domino'
+ class: 'Domino'
+ -
name: orchestra
criteria: 'ret == 0'
blocking: false
@@ -348,46 +348,89 @@ tiers:
run:
module: 'functest.opnfv_tests.features.netready'
class: 'GluonVping'
+ -
+ name: barometer
+ criteria: 'status == "PASS"'
+ blocking: false
+ description: >-
+ Test suite for the Barometer project. Separate tests verify the
+ proper configuration and functionality of the following
+ collectd plugins Ceilometer, Hugepages, Memory RAS (mcelog),
+ and OVS Events
+ dependencies:
+ installer: 'fuel'
+ scenario: 'kvm_ovs_dpdk_bar'
+ run:
+ module: 'functest.opnfv_tests.features.barometer'
+ class: 'BarometerCollectd'
-
name: components
order: 3
- ci_loop: 'weekly'
+ ci_loop: 'daily'
description : >-
Extensive testing of OpenStack API.
testcases:
+# -
+# name: tempest_full_parallel
+# criteria: 'success_rate >= 80%'
+# blocking: false
+# description: >-
+# The list of test cases is generated by
+# Tempest automatically and depends on the parameters of
+# the OpenStack deplopyment.
+# dependencies:
+# installer: '^((?!netvirt).)*$'
+# scenario: ''
+# run:
+# module: 'functest.opnfv_tests.openstack.tempest.tempest'
+# class: 'TempestFullParallel'
-
- name: tempest_full_parallel
- criteria: 'success_rate >= 80%'
+ name: tempest_defcore
+ criteria: 'success_rate == 100%'
blocking: false
description: >-
- The list of test cases is generated by
- Tempest automatically and depends on the parameters of
- the OpenStack deplopyment.
+ This is the set of Tempest test cases created by OpenStack
+ Interop Working Group for certification purposes.
dependencies:
- installer: '^((?!netvirt).)*$'
- scenario: ''
+ installer: ''
+ scenario: 'nosdn-nofeature-ha'
run:
module: 'functest.opnfv_tests.openstack.tempest.tempest'
- class: 'TempestFullParallel'
-
+ class: 'TempestDefcore'
-
- name: rally_full
- criteria: 'success_rate >= 90%'
+ name: tempest_custom
+ criteria: 'success_rate == 100%'
blocking: false
description: >-
- This test case runs the full suite of scenarios of the OpenStack
- Rally suite using several threads and iterations.
+ The test case allows running a customized list of tempest
+ test cases defined in a file under
+ <dir_functest_repo>/functest/opnfv_tests/openstack/
+ /tempest/custom_tests/test_list.txt
+ The file is empty and can be customized with the desired tests.
dependencies:
- installer: '^((?!netvirt).)*$'
- scenario: ''
+ installer: 'unknown'
+ scenario: 'unknown'
run:
- module: 'functest.opnfv_tests.openstack.rally.rally'
- class: 'RallyFull'
+ module: 'functest.opnfv_tests.openstack.tempest.tempest'
+ class: 'TempestCustom'
+# -
+# name: rally_full
+# criteria: 'success_rate >= 90%'
+# blocking: false
+# description: >-
+# This test case runs the full suite of scenarios of the OpenStack
+# Rally suite using several threads and iterations.
+# dependencies:
+# installer: '^((?!netvirt).)*$'
+# scenario: ''
+# run:
+# module: 'functest.opnfv_tests.openstack.rally.rally'
+# class: 'RallyFull'
-
name: vnf
order: 4
- ci_loop: '(daily)|(weekly)'
+ ci_loop: 'weekly'
description : >-
Collection of VNF test cases.
testcases:
@@ -400,7 +443,7 @@ tiers:
using the Cloudify orchestrator. It also runs some signaling traffic.
dependencies:
installer: ''
- scenario: 'nosdn-nofeature'
+ scenario: '(ocl)|(nosdn)|^(os-odl)((?!bgpvpn).)*$'
run:
module: 'functest.opnfv_tests.vnf.ims.cloudify_ims'
class: 'ImsVnf'
@@ -411,8 +454,8 @@ tiers:
description: >-
Test suite from Parser project.
dependencies:
- installer: 'unknown'
- scenario: 'unknown'
+ installer: ''
+ scenario: ''
run:
module: 'functest.opnfv_tests.vnf.aaa.aaa'
class: 'AaaVnf'
@@ -437,8 +480,8 @@ tiers:
description: >-
VNF deployment with OpenBaton (Orchestra)
dependencies:
- installer: 'unknown'
- scenario: 'unknown'
+ installer: ''
+ scenario: ''
run:
module: 'functest.opnfv_tests.vnf.ims.orchestra_ims'
class: 'ImsVnf'
@@ -455,3 +498,16 @@ tiers:
run:
module: 'functest.opnfv_tests.vnf.ims.opera_ims'
class: 'ImsVnf'
+
+ -
+ name: vyos_vrouter
+ criteria: 'status == "PASS"'
+ blocking: false
+ description: >-
+ This test case is vRouter testing.
+ dependencies:
+ installer: 'fuel'
+ scenario: 'nosdn-nofeature'
+ run:
+ module: 'functest.opnfv_tests.vnf.router.vyos_vrouter'
+ class: 'VrouterVnf'
diff --git a/functest/ci/tier_builder.py b/functest/ci/tier_builder.py
index e1c3e49e..dae7c73e 100755
--- a/functest/ci/tier_builder.py
+++ b/functest/ci/tier_builder.py
@@ -11,7 +11,7 @@ import tier_handler as th
import yaml
-class TierBuilder:
+class TierBuilder(object):
def __init__(self, ci_installer, ci_scenario, testcases_file):
self.ci_installer = ci_installer
diff --git a/functest/ci/tier_handler.py b/functest/ci/tier_handler.py
index 1eadfba5..127986bf 100755
--- a/functest/ci/tier_handler.py
+++ b/functest/ci/tier_handler.py
@@ -28,7 +28,7 @@ def split_text(text, max_len):
return lines
-class Tier:
+class Tier(object):
def __init__(self, name, order, ci_loop, description=""):
self.tests_array = []
@@ -102,7 +102,7 @@ class Tier:
return out
-class TestCase:
+class TestCase(object):
def __init__(self, name, dependency, criteria, blocking, description=""):
self.name = name
@@ -160,7 +160,7 @@ class TestCase:
return out
-class Dependency:
+class Dependency(object):
def __init__(self, installer, scenario):
self.installer = installer
diff --git a/functest/cli/commands/cli_env.py b/functest/cli/commands/cli_env.py
index 9423631b..14ad01bf 100644
--- a/functest/cli/commands/cli_env.py
+++ b/functest/cli/commands/cli_env.py
@@ -16,7 +16,7 @@ from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
-class CliEnv:
+class CliEnv(object):
def __init__(self):
pass
diff --git a/functest/cli/commands/cli_os.py b/functest/cli/commands/cli_os.py
index aeb34974..f85f4041 100644
--- a/functest/cli/commands/cli_os.py
+++ b/functest/cli/commands/cli_os.py
@@ -18,7 +18,7 @@ import functest.utils.openstack_clean as os_clean
import functest.utils.openstack_snapshot as os_snapshot
-class CliOpenStack:
+class CliOpenStack(object):
def __init__(self):
self.os_auth_url = CONST.OS_AUTH_URL
diff --git a/functest/cli/commands/cli_testcase.py b/functest/cli/commands/cli_testcase.py
index b6566245..6644a0c2 100644
--- a/functest/cli/commands/cli_testcase.py
+++ b/functest/cli/commands/cli_testcase.py
@@ -19,7 +19,7 @@ import functest.utils.functest_utils as ft_utils
import functest.utils.functest_vacation as vacation
-class CliTestcase:
+class CliTestcase(object):
def __init__(self):
self.tiers = tb.TierBuilder(CONST.INSTALLER_TYPE,
diff --git a/functest/cli/commands/cli_tier.py b/functest/cli/commands/cli_tier.py
index b9d25b6d..012b11d0 100644
--- a/functest/cli/commands/cli_tier.py
+++ b/functest/cli/commands/cli_tier.py
@@ -18,7 +18,7 @@ from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
-class CliTier:
+class CliTier(object):
def __init__(self):
self.tiers = tb.TierBuilder(CONST.INSTALLER_TYPE,
diff --git a/functest/core/feature_base.py b/functest/core/feature_base.py
index fe9a9998..2bd1ec83 100644
--- a/functest/core/feature_base.py
+++ b/functest/core/feature_base.py
@@ -7,6 +7,7 @@ from functest.utils.constants import CONST
class FeatureBase(base.TestcaseBase):
+
def __init__(self, project='functest', case='', repo='', cmd=''):
super(FeatureBase, self).__init__()
self.project_name = project
@@ -19,7 +20,7 @@ class FeatureBase(base.TestcaseBase):
def run(self, **kwargs):
self.prepare()
self.start_time = time.time()
- ret = ft_utils.execute_command(self.cmd, output_file=self.result_file)
+ ret = self.execute()
self.stop_time = time.time()
self.post()
self.parse_results(ret)
@@ -27,6 +28,13 @@ class FeatureBase(base.TestcaseBase):
self.logger.info("Test result is stored in '%s'" % self.result_file)
return base.TestcaseBase.EX_OK
+ def execute(self):
+ '''
+ Executer method that can be overwritten
+ By default it executes a shell command.
+ '''
+ return ft_utils.execute_command(self.cmd, output_file=self.result_file)
+
def prepare(self, **kwargs):
pass
diff --git a/functest/core/vnf_base.py b/functest/core/vnf_base.py
index 07b64fd0..9438dca1 100644
--- a/functest/core/vnf_base.py
+++ b/functest/core/vnf_base.py
@@ -111,9 +111,9 @@ class VnfOnBoardingBase(base.TestcaseBase):
self.keystone_client = os_utils.get_keystone_client()
self.logger.info("Prepare OpenStack plateform(create tenant and user)")
- user_id = os_utils.get_user_id(self.keystone_client,
- self.creds['username'])
- if user_id == '':
+ admin_user_id = os_utils.get_user_id(self.keystone_client,
+ self.creds['username'])
+ if admin_user_id == '':
self.step_failure("Failed to get id of " +
self.creds['username'])
@@ -133,7 +133,7 @@ class VnfOnBoardingBase(base.TestcaseBase):
self.logger.error("Failed to get id for %s role" % role_name)
self.step_failure("Failed to get role id of " + role_name)
- if not os_utils.add_role_user(self.keystone_client, user_id,
+ if not os_utils.add_role_user(self.keystone_client, admin_user_id,
role_id, tenant_id):
self.logger.error("Failed to add %s on tenant" %
self.creds['username'])
@@ -149,6 +149,13 @@ class VnfOnBoardingBase(base.TestcaseBase):
self.logger.error("Failed to create %s user" % self.tenant_name)
self.step_failure("Failed to create user ")
+ if not os_utils.add_role_user(self.keystone_client, user_id,
+ role_id, tenant_id):
+ self.logger.error("Failed to add %s on tenant" %
+ self.tenant_name)
+ self.step_failure("Failed to add %s on tenant" %
+ self.tenant_name)
+
self.logger.info("Update OpenStack creds informations")
self.admin_creds = self.creds.copy()
self.admin_creds.update({
diff --git a/functest/opnfv_tests/features/barometer.py b/functest/opnfv_tests/features/barometer.py
new file mode 100644
index 00000000..aec2bce5
--- /dev/null
+++ b/functest/opnfv_tests/features/barometer.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+
+import functest.core.feature_base as base
+import functest.utils.functest_logger as ft_logger
+
+from baro_tests import collectd
+
+
+class BarometerCollectd(base.FeatureBase):
+ '''
+ Class for executing barometercollectd testcase.
+ '''
+
+ def __init__(self):
+ super(BarometerCollectd, self).__init__(project='barometer',
+ case='barometercollectd',
+ repo='dir_repo_barometer')
+ self.logger = ft_logger.Logger("BarometerCollectd").getLogger()
+
+ def execute(self):
+ return collectd.main(self.logger)
diff --git a/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template b/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template
index 909f45d2..ed5e61fe 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template
+++ b/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template
@@ -7,7 +7,7 @@ parameters:
default: public
image:
type: string
- default: cirros-0.3.4-x86_64-uec
+ default: cirros-0.3.5-x86_64-uec
flavor:
type: string
default: m1.tiny
diff --git a/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template b/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template
index 826ca9da..116b5bb6 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template
+++ b/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template
@@ -4,7 +4,7 @@ parameters:
# set all correct defaults for parameters before launch test
image:
type: string
- default: cirros-0.3.4-x86_64-uec
+ default: cirros-0.3.5-x86_64-uec
flavor:
type: string
default: m1.tiny
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt
index bb1d172d..1456db87 100644
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt
@@ -1,35 +1,19 @@
-# Set of DefCore tempest test cases (see http://www.openstack.org/brand/interop)
-# This approved version (2016.01) is valid for Juno, Kilo, and Liberty releases of OpenStack
-# The list is stored at http://git.openstack.org/cgit/openstack/defcore/plain/2016.01/2016.01.required.txt
-tempest.api.compute.images.test_images.ImagesTestJSON.test_delete_saving_image[id-aa06b52b-2db5-4807-b218-9441f75d74e3]
+# Set of DefCore tempest test cases not flagged and required. It only contains OpenStack core (no object storage)
+# The approved guidelines (2016.08) are valid for Kilo, Liberty, Mitaka and Newton releases of OpenStack
+# The list can be generated using the Rest API from RefStack project:
+# https://refstack.openstack.org/api/v1/guidelines/2016.08/tests?target=compute&type=required&alias=true&flag=false
tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314]
tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_changes_since[id-18bac3ae-da27-436c-92a9-b22474d13aab]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_name[id-33163b73-79f5-4d07-a7ea-9213bcc468ff]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_id[id-9f238683-c763-45aa-b848-232ec3ce3105]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_ref[id-05a377b8-28cf-4734-a1e6-2ab5c38bf606]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_status[id-a3f5b513-aeb3-42a9-b18e-f091ef73254d]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_type[id-e3356918-4d3e-4756-81d5-abc4524ba29f]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_limit_results[id-3a484ca9-67ba-451e-b494-7fcf28d32d62]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_changes_since[id-7d439e18-ac2e-4827-b049-7e18004712c4]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_name[id-644ea267-9bd9-4f3b-af9f-dffa02396a17]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_server_ref[id-8c78f822-203b-4bf6-8bba-56ebd551cf84]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_status[id-9b0ea018-6185-4f71-948a-a123a107988e]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_type[id-888c0cc0-7223-43c5-9db0-b125fd0a393b]
-tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_limit_results[id-ba2fa9a9-b672-47cc-b354-3b4c0600e2cb]
-tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image[id-490d0898-e12a-463f-aef0-c50156b9f789]
-tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images[id-fd51b7f4-d4a3-4331-9885-866658112a6f]
-tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail[id-9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6]
tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
+tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f]
tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
+tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f]
tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
+tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f]
tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
+tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f]
tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c]
tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a]
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9]
@@ -37,31 +21,28 @@ tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.t
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4]
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f]
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_active_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908]
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba]
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76]
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a]
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip[id-43a1242e-7b31-48d1-88f2-3f72aa9f2077]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip_regex[id-a905e287-c35e-42f2-b132-d02b09f3654a]
tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9]
tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits[id-12c80a9f-2dec-480e-882b-98ba15757659]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4,negative]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f]
tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32,smoke]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32]
tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm[id-1499262a-9328-4eda-9068-db1ac57498d2]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert[id-c03aab19-adb1-44f5-917d-c419577e9e68]
tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560]
tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658]
tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a]
@@ -74,49 +55,194 @@ tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_key
tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699]
tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077]
tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_non_existent_flavor[id-ced1a1d7-2ab6-45c9-b90f-b27d87b30efd,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_null_flavor[id-45436a7d-a388-4a35-a9d8-3adc5d0d940b,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_delete_a_server_of_another_tenant[id-5c75009d-3eea-423e-bea3-61b09fd25f9c,negative]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_update_server_of_another_tenant[id-543d84c1-dd2e-4c6d-8cb2-b9da0efaa384,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_reboot_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899]
tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a]
tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107]
+tempest.api.compute.test_versions.TestVersions.test_list_api_versions[id-6c0a0990-43b6-4529-9b61-5fd8daf7c55c]
tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff]
tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513]
-tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list[id-bc2dd1a0-15af-48e5-9990-f2e75a48325d]
-tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_with_details[id-bad0567a-5a4f-420b-851e-780b55bb867c]
-tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_invalid_volume_id[id-f01904f2-e975-4915-98ce-cb5fa27bde4f,negative]
-tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id[id-62bab09a-4c03-4617-8cca-8572bc94af9b,negative]
+tempest.api.identity.v3.TestApiDiscovery.test_api_media_types[id-657c1970-4722-4189-8831-7325f3bc4265]
+tempest.api.identity.v3.TestApiDiscovery.test_api_version_resources[id-b9232f5e-d9e5-4d97-b96c-28d3db4de1bd]
+tempest.api.identity.v3.TestApiDiscovery.test_api_version_statuses[id-8879a470-abfb-47bb-bb8d-5a7fd279ad1e]
+tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_media_types[id-657c1970-4722-4189-8831-7325f3bc4265]
+tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_resources[id-b9232f5e-d9e5-4d97-b96c-28d3db4de1bd]
+tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_statuses[id-8879a470-abfb-47bb-bb8d-5a7fd279ad1e]
tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token[id-6f8e4436-fc96-4282-8122-e41df57197a9]
+tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_delete_image[id-f848bb94-1c6e-45a4-8726-39e3a5b23535]
+tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image[id-f66891a7-a35c-41a8-b590-a065c2a1caa6]
+tempest.api.image.v2.test_images.ListImagesTest.test_get_image_schema[id-622b925c-479f-4736-860d-adeaf13bc371]
+tempest.api.image.v2.test_images.ListImagesTest.test_get_images_schema[id-25c8d7b2-df21-460f-87ac-93130bcdc684]
+tempest.api.image.v2.test_images.ListImagesTest.test_index_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
+tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_container_format[id-9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e]
+tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_disk_format[id-4a4735a7-f22f-49b6-b0d9-66e1ef7453eb]
+tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_limit[id-e914a891-3cc8-4b40-ad32-e0a39ffbddbb]
+tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_min_max_size[id-4ad8c157-971a-4ba8-aa84-ed61154b1e7f]
+tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_size[id-cf1b9a48-8340-480e-af7b-fe7e17690876]
+tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_status[id-7fc9e369-0f58-4d05-9aa5-0969e2d59d15]
+tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_visibility[id-7a95bb92-d99e-4b12-9718-7bc6ab73e6d2]
tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v1.test_images.ListImagesTest.test_index_no_params[id-246178ab-3b33-4212-9a4b-a7fe8261794d]
-tempest.api.object_storage.test_object_expiry.ObjectExpiryTest.test_get_object_after_expiry_time[id-fb024a42-37f3-4ba5-9684-4f40a7910b41]
-tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way[id-06f90388-2d0e-40aa-934c-e9a8833e958a]
-tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers[id-aa467252-44f3-472a-b5ae-5b57c3c9c147]
-tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container[id-1a9ab572-1b66-4981-8c21-416e2a5e6011]
-tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself[id-2248abba-415d-410b-9c30-22dff9cd6e67]
-tempest.api.object_storage.test_object_services.ObjectTest.test_create_object[id-5b4ce26f-3545-46c9-a2ba-5754358a4c62,smoke]
-tempest.api.object_storage.test_object_services.ObjectTest.test_delete_object[id-17738d45-03bd-4d45-9e0b-7b2f58f98687]
-tempest.api.object_storage.test_object_services.ObjectTest.test_get_object[id-02610ba7-86b7-4272-9ed8-aa8d417cb3cd,smoke]
-tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_if_different[id-50d01f12-526f-4360-9ac2-75dd508d7b68]
-tempest.api.object_storage.test_object_services.ObjectTest.test_object_upload_in_segments[id-e3e6a64a-9f50-4955-b987-6ce6767c97fb]
-tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_get_object_using_temp_url[id-f91c96d4-1230-4bba-8eb9-84476d18d991]
-tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_put_object_using_temp_url[id-9b08dade-3571-4152-8a4f-a4f2a873a735]
-tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container[id-a151e158-dcbf-4a1f-a1e7-46cd65895a6f]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_get_image_schema[id-622b925c-479f-4736-860d-adeaf13bc371]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_get_images_schema[id-25c8d7b2-df21-460f-87ac-93130bcdc684]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_container_format[id-9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_disk_format[id-4a4735a7-f22f-49b6-b0d9-66e1ef7453eb]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_limit[id-e914a891-3cc8-4b40-ad32-e0a39ffbddbb]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_min_max_size[id-4ad8c157-971a-4ba8-aa84-ed61154b1e7f]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_size[id-cf1b9a48-8340-480e-af7b-fe7e17690876]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_status[id-7fc9e369-0f58-4d05-9aa5-0969e2d59d15]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_visibility[id-7a95bb92-d99e-4b12-9718-7bc6ab73e6d2]
+tempest.api.image.v2.test_images.ListUserImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
+tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_image_null_id[id-32248db1-ab88-4821-9604-c7c369f1f88c]
+tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_non_existing_image[id-6fe40f1c-57bd-4918-89cc-8500f850f3de]
+tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_delete_deleted_image[id-e57fc127-7ba0-4693-92d7-1d8a05ebcba9]
+tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_image_null_id[id-ef45000d-0a72-4781-866d-4cb7bf2562ad]
+tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_non_existent_image[id-668743d5-08ad-4480-b2b8-15da34f81d9f]
+tempest.api.image.v2.test_images_tags.ImagesTagsTest.test_update_delete_tags_for_image[id-10407036-6059-4f95-a2cd-cbbbee7ed329]
+tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_delete_non_existing_tag[id-39c023a2-325a-433a-9eea-649bf1414b19]
+tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_update_tags_for_non_existing_image[id-8cd30f82-6f9a-4c6e-8034-c1b51fba43d9]
+tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
+tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
+tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
+tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
+tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
+tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
+tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
+tempest.api.network.test_networks.NetworksTest.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221]
+tempest.api.network.test_networks.NetworksTest.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
+tempest.api.network.test_networks.NetworksTest.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43]
+tempest.api.network.test_networks.NetworksTest.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
+tempest.api.network.test_networks.NetworksTest.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a]
+tempest.api.network.test_networks.NetworksTest.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
+tempest.api.network.test_networks.NetworksTest.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e]
+tempest.api.network.test_networks.NetworksTest.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
+tempest.api.network.test_networks.NetworksTest.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc]
+tempest.api.network.test_networks.NetworksTest.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
+tempest.api.network.test_networks.NetworksTest.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
+tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
+tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
+tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
+tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
+tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
+tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
+tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
+tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221]
+tempest.api.network.test_networks.NetworksTestJSON.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
+tempest.api.network.test_networks.NetworksTestJSON.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43]
+tempest.api.network.test_networks.NetworksTestJSON.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
+tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a]
+tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
+tempest.api.network.test_networks.NetworksTestJSON.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e]
+tempest.api.network.test_networks.NetworksTestJSON.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
+tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc]
+tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
+tempest.api.network.test_networks.NetworksTestJSON.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
+tempest.api.network.test_ports.PortsTestJSON.test_create_bulk_port[id-67f1b811-f8db-43e2-86bd-72c074d4a42c]
+tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1]
+tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c]
+tempest.api.network.test_ports.PortsTestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e]
+tempest.api.network.test_ports.PortsTestJSON.test_list_ports_fields[id-ff7f117f-f034-4e0e-abff-ccef05c454b4]
+tempest.api.network.test_ports.PortsTestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f]
+tempest.api.network.test_ports.PortsTestJSON.test_show_port_fields[id-45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd]
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes[id-58091b66-4ff4-4cc1-a549-05d60c7acd1a]
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes[id-edf6766d-3d40-4621-bc6e-2521a44c257d]
+tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802]
+tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_additional_args[id-87dfbcf9-1849-43ea-b1e4-efa3eeae9f71]
+tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code[id-c9463db8-b44d-4f52-b6c0-8dbda99f26ce]
+tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_protocol_integer_value[id-0a307599-6655-4220-bebc-fd70c64f2290]
+tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_group_id[id-c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b]
+tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_ip_prefix[id-16459776-5da2-4634-bce4-4b55ee3ec188]
+tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9]
+tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_additional_default_security_group_fails[id-2323061e-9fbf-4eb0-b547-7e8fafc90849]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_duplicate_security_group_rule_fails[id-8fde898f-ce88-493b-adc9-4e4692879fc5]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_ethertype[id-5666968c-fff3-40d6-9efc-df1c8bd01abb]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_protocol[id-981bdc22-ce48-41ed-900a-73148b583958]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_remote_ip_prefix[id-5f8daf69-3c5f-4aaa-88c9-db1d66f68679]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_invalid_ports[id-0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_remote_groupid[id-4bf786fd-2f02-443c-9716-5b98e159a49a]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_security_group[id-be308db6-a7cf-4d5c-9baf-71bafd73f35e]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_delete_non_existent_security_group[id-1f1bb89d-5664-4956-9fcd-83ee0fa603df]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group[id-424fd5c3-9ddc-486a-b45f-39bf0c820fc6]
+tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group_rule[id-4c094c09-000b-4e41-8100-9617600c02a6]
+tempest.api.volume.test_availability_zone.AvailabilityZoneV2TestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
+tempest.api.volume.test_extensions.ExtensionsV2TestJSON.test_list_extensions[id-94607eb0-43a5-47ca-82aa-736b41bd2e2c]
+tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_create_get_delete_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
+tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_crud_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
+tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_update_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
+tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_create_get_delete_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
+tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_crud_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
+tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_update_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
+tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance[id-fff42874-7db5-4487-a8e1-ddda5fb5288d]
+tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_get_volume_attachment[id-9516a2c8-9135-488c-8dd6-5677a7e5f371]
+tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_reserve_unreserve_volume[id-92c4ef64-51b2-40c0-9f7e-4749fbaaba33]
+tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_bootable[id-63e21b4c-0a0c-41f6-bfc3-7c2816815599]
+tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_readonly_update[id-fff74e1e-5bd3-4b33-9ea9-24c103bc3f59]
+tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51]
+tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_as_clone[id-3f591b4a-7dc6-444c-bd51-77469506b3a1]
+tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_by_name[id-a28e8da4-0b56-472f-87a8-0f4d3f819c02]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_by_name[id-2de3a6d4-12aa-403b-a8f2-fdeb42a89623]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_param_display_name_and_status[id-777c87c1-2fc4-4883-8b8e-5c0b951d1ec8]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_display_name_and_status[id-856ab8ca-6009-4c37-b691-be1065528ad4]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_metadata[id-1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_details[id-adcbb5a7-5ad8-4b61-bd10-5380e111a877]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_param_metadata[id-b5ebea1b-0603-40a0-bb41-15fcd0a53214]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_availability_zone[id-c0cfa863-3020-40d7-b587-e35f597d5d87]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_status[id-39654e13-734c-4dab-95ce-7613bf8407ce]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_availability_zone[id-e1b80d13-94f0-4ba2-a40e-386af29f8db1]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_status[id-2943f712-71ec-482a-bf49-d5ca06216b9f]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_attach_volumes_with_nonexistent_volume_id[id-f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_invalid_size[id-1ed83a8a-682d-4dfb-a30e-ee63ffd6c049]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_snapshot_id[id-0c36f6ae-4604-4017-b0a9-34fdc63096f9]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_source_volid[id-47c73e08-4be8-45bb-bfdf-0c4e79b88344]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_volume_type[id-10254ed8-3849-454e-862e-3ab8e6aa01d2]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_out_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_negative[id-8b472729-9eba-446e-a83b-916bdb34bef7]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_zero[id-41331caa-eaf4-4001-869d-bc18c1869360]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_without_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_invalid_volume_id[id-1f035827-7c32-4019-9240-b4ec2dbd9dfd]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_volume_without_passing_volume_id[id-441a1550-5d44-4b30-af0f-a6d402f52026]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_detach_volumes_with_invalid_volume_id[id-9f9c24e4-011d-46b5-b992-952140ce237a]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_invalid_volume_id[id-30799cfd-7ee4-446c-b66c-45b383ed211b]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_volume_without_passing_volume_id[id-c6c3db06-29ad-4e91-beb0-2ab195fe49e3]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_invalid_status[id-ba94b27b-be3f-496c-a00e-0283b373fa75]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_nonexistent_name[id-9ca17820-a0e7-4cbd-a7fa-f4468735e359]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_invalid_status[id-143b279b-7522-466b-81be-34a87d564a7c]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_nonexistent_name[id-0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_negative_volume_status[id-449c4ed2-ecdd-47bb-98dc-072aeccf158c]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_nonexistent_volume_id[id-ac6084c0-0546-45f9-b284-38a367e0e0e2]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_unreserve_volume_with_nonexistent_volume_id[id-eb467654-3dc1-4a72-9b46-47c29d22654c]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_empty_volume_id[id-72aeca85-57a5-4c1f-9057-f320f9ea575b]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_invalid_volume_id[id-e66e40d6-65e6-4e75-bdc7-636792fa152d]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_nonexistent_volume_id[id-0186422c-999a-480e-a026-6a665744c30c]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_delete_nonexistent_volume_id[id-555efa6e-efcd-44ef-8a3b-4a7ca4837a29]
+tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_get_nonexistent_volume_id[id-f131c586-9448-44a4-a8b0-54ca838aa43e]
+tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_create_get_list_update_delete[id-2a8abbe4-d871-46db-b049-c41f5af8216e]
+tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
+tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
+tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot[id-677863d1-3142-456d-b6ac-9924f667a7f4]
+tempest.api.volume.test_volumes_snapshots_list.VolumesV2SnapshotListTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
+tempest.api.volume.test_volumes_snapshots_list.VolumesV2SnapshotListTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
+tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id[id-e3e466af-70ab-4f4b-a967-ab04e3532ea7]
+tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id[id-bb9da53e-d335-4309-9c15-7e76fd5e4d6d]
+tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_pagination[id-e9138a2c-f67b-4796-8efa-635c196d01de]
+tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_with_multiple_params[id-2a7064eb-b9c3-429b-b888-33928fc5edd3]
+tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_pagination[id-af55e775-8e4b-4feb-8719-215c43b0238c] \ No newline at end of file
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
new file mode 100644
index 00000000..ac4e3728
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
@@ -0,0 +1,4 @@
+# This is an empty file to be filled up with the desired tempest test cases
+# Examples:
+#tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
+#tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops \ No newline at end of file
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index f925336d..4c96500d 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -324,8 +324,17 @@ class TempestMultisite(TempestCommon):
class TempestCustom(TempestCommon):
- def __init__(self, mode, option):
+ def __init__(self):
TempestCommon.__init__(self)
self.case_name = "tempest_custom"
- self.MODE = mode
- self.OPTION = option
+ self.MODE = "custom"
+ self.OPTION = "--concurrency 1"
+
+
+class TempestDefcore(TempestCommon):
+
+ def __init__(self):
+ TempestCommon.__init__(self)
+ self.case_name = "tempest_defcore"
+ self.MODE = "defcore"
+ self.OPTION = "--concurrency 1"
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
index a5309bd4..9d57cfae 100644
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ b/functest/opnfv_tests/openstack/vping/vping_base.py
@@ -32,6 +32,8 @@ class VPingBase(testcase_base.TestcaseBase):
self.image_name = CONST.vping_image_name
self.image_filename = CONST.openstack_image_file_name
self.image_format = CONST.openstack_image_disk_format
+ self.image_username = CONST.openstack_image_username
+ self.image_password = CONST.openstack_image_password
self.image_path = os.path.join(CONST.dir_functest_data,
self.image_filename)
diff --git a/functest/opnfv_tests/openstack/vping/vping_ssh.py b/functest/opnfv_tests/openstack/vping/vping_ssh.py
index b032c308..7a58a41f 100755
--- a/functest/opnfv_tests/openstack/vping/vping_ssh.py
+++ b/functest/opnfv_tests/openstack/vping/vping_ssh.py
@@ -61,8 +61,6 @@ class VPingSSH(vping_base.VPingBase):
def establish_ssh(self, vm, floatip):
self.logger.info("Trying to establish SSH connection to %s..."
% floatip)
- username = 'cirros'
- password = 'cubswin:)'
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
@@ -73,8 +71,8 @@ class VPingSSH(vping_base.VPingBase):
cidr_first_octet = self.private_subnet_cidr.split('.')[0]
while timeout > 0:
try:
- ssh.connect(floatip, username=username,
- password=password, timeout=2)
+ ssh.connect(floatip, username=self.image_username,
+ password=self.image_password, timeout=2)
self.logger.debug("SSH connection established to %s."
% floatip)
break
diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py
index 9bff324f..69818f5a 100755
--- a/functest/opnfv_tests/sdn/odl/odl.py
+++ b/functest/opnfv_tests/sdn/odl/odl.py
@@ -186,7 +186,7 @@ class ODLTests(testcase_base.TestcaseBase):
return self.main(suites, **kwargs)
-class ODLParser():
+class ODLParser(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
diff --git a/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py b/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
index 090502ba..c2198690 100644
--- a/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
+++ b/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
@@ -10,13 +10,15 @@ from pexpect import pxssh
import functest.utils.functest_logger as ft_logger
+from functest.utils.constants import CONST
+
OK = 200
CREATED = 201
ACCEPTED = 202
NO_CONTENT = 204
-class SfcOnos:
+class SfcOnos(object):
"""Defines all the def function of SFC."""
def __init__(self):
@@ -99,6 +101,8 @@ class SfcOnos:
self.ip_pool = 0
self.vm_public_ip = []
self.vm_public_id = []
+ self.cirros_username = CONST.openstack_image_username
+ self.cirros_password = CONST.openstack_image_password
self.net_id1 = 0
self.vm = []
self.address = 0
@@ -628,9 +632,7 @@ class SfcOnos:
s = pxssh.pxssh()
hostname = self.vm_public_ip[0]
- username = "cirros"
- password = "cubswin:)"
- s.login(hostname, username, password)
+ s.login(hostname, self.cirros_username, self.cirros_password)
s.sendline("ping -c 5 " + str(self.port_ip[2]))
s.prompt() # match the prompt
@@ -644,9 +646,7 @@ class SfcOnos:
def vm1(queue1):
s = pxssh.pxssh()
hostname = self.vm_public_ip[1]
- username = "cirros"
- password = "cubswin:)"
- s.login(hostname, username, password)
+ s.login(hostname, self.cirros_username, self.cirros_password)
s.sendline('sudo ./firewall')
s.prompt()
output_pack = s.before
diff --git a/functest/opnfv_tests/sdn/onos/teston/adapters/foundation.py b/functest/opnfv_tests/sdn/onos/teston/adapters/foundation.py
index bf2c4302..2bef5cc6 100644
--- a/functest/opnfv_tests/sdn/onos/teston/adapters/foundation.py
+++ b/functest/opnfv_tests/sdn/onos/teston/adapters/foundation.py
@@ -21,7 +21,7 @@ import functest.utils.functest_constants as ft_constants
import functest.utils.functest_utils as ft_utils
-class Foundation:
+class Foundation(object):
def __init__(self):
diff --git a/functest/opnfv_tests/vnf/ims/clearwater.py b/functest/opnfv_tests/vnf/ims/clearwater.py
index eb0abacd..32c6dc5c 100644
--- a/functest/opnfv_tests/vnf/ims/clearwater.py
+++ b/functest/opnfv_tests/vnf/ims/clearwater.py
@@ -12,7 +12,7 @@
########################################################################
-class Clearwater:
+class Clearwater(object):
def __init__(self, inputs={}, orchestrator=None, logger=None):
self.config = inputs
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
index e354563e..c2c251ad 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
@@ -130,8 +130,8 @@ class ImsVnf(vnf_base.VnfOnBoardingBase):
flavor_exist, flavor_id = os_utils.get_or_create_flavor(
"m1.large",
self.orchestrator['requirements']['ram_min'],
- '1',
- '1',
+ '50',
+ '2',
public=True)
self.logger.debug("Flavor id: %s" % flavor_id)
@@ -187,18 +187,23 @@ class ImsVnf(vnf_base.VnfOnBoardingBase):
self.orchestrator['blueprint']['url'],
self.orchestrator['blueprint']['branch'])
- cfy.deploy_manager()
- return {'status': 'PASS', 'result': ''}
+ error = cfy.deploy_manager()
+ if error:
+ self.logger.error(error)
+ return {'status': 'FAIL', 'result': error}
+ else:
+ return {'status': 'PASS', 'result': ''}
def deploy_vnf(self):
- cw = Clearwater(self.vnf.inputs, self.orchestrator.object, self.logger)
- self.vnf.object = cw
+ cw = Clearwater(self.vnf['inputs'], self.orchestrator['object'],
+ self.logger)
+ self.vnf['object'] = cw
self.logger.info("Collect flavor id for all clearwater vm")
flavor_exist, flavor_id = os_utils.get_or_create_flavor(
"m1.small",
self.vnf['requirements']['ram_min'],
- '1',
+ '20',
'1',
public=True)
self.logger.debug("Flavor id: %s" % flavor_id)
@@ -211,7 +216,7 @@ class ImsVnf(vnf_base.VnfOnBoardingBase):
cw.set_flavor_id(flavor_id)
# VMs image
- if 'os_image' in self.vnf.requirements.keys():
+ if 'os_image' in self.vnf['requirements'].keys():
image_id = os_utils.get_image_id(
self.glance_client, self.vnf['requirements']['os_image'])
if image_id == '':
@@ -229,8 +234,12 @@ class ImsVnf(vnf_base.VnfOnBoardingBase):
cw.set_external_network_name(ext_net)
- cw.deploy_vnf()
- return {'status': 'PASS', 'result': ''}
+ error = cw.deploy_vnf()
+ if error:
+ self.logger.error(error)
+ return {'status': 'FAIL', 'result': error}
+ else:
+ return {'status': 'PASS', 'result': ''}
def test_vnf(self):
script = "source {0}venv_cloudify/bin/activate; "
@@ -248,7 +257,7 @@ class ImsVnf(vnf_base.VnfOnBoardingBase):
api_url = "http://" + mgr_ip + "/api/v2"
dep_outputs = requests.get(api_url + "/deployments/" +
- self.vnf.deployment_name + "/outputs")
+ self.vnf['deployment_name'] + "/outputs")
dns_ip = dep_outputs.json()['outputs']['dns_ip']
ellis_ip = dep_outputs.json()['outputs']['ellis_ip']
@@ -332,8 +341,8 @@ class ImsVnf(vnf_base.VnfOnBoardingBase):
return {'status': 'FAIL', 'result': ''}
def clean(self):
- self.vnf.object.undeploy_vnf()
- self.orchestrator.object.undeploy_manager()
+ self.vnf['object'].undeploy_vnf()
+ self.orchestrator['object'].undeploy_manager()
super(ImsVnf, self).clean()
def main(self, **kwargs):
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
index c5918087..775685fa 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
@@ -6,7 +6,7 @@ cloudify:
url: https://github.com/boucherv-orange/cloudify-manager-blueprints.git
branch: '3.3.1-build'
requirements:
- ram_min: 3000
+ ram_min: 4000
os_image: centos_7
inputs:
keystone_username: ""
@@ -29,7 +29,7 @@ clearwater:
branch: stable
deployment_name: clearwater-opnfv
requirements:
- ram_min: 1700
+ ram_min: 2000
os_image: ubuntu_14.04
inputs:
image_id: ''
diff --git a/functest/opnfv_tests/vnf/ims/opera_ims.py b/functest/opnfv_tests/vnf/ims/opera_ims.py
index 073a56c3..7ead401f 100644
--- a/functest/opnfv_tests/vnf/ims/opera_ims.py
+++ b/functest/opnfv_tests/vnf/ims/opera_ims.py
@@ -8,148 +8,393 @@
# http://www.apache.org/licenses/LICENSE-2.0
import json
-import os
-import requests
-import subprocess
+import socket
+import sys
import time
+import yaml
import functest.core.vnf_base as vnf_base
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+import os
from functest.utils.constants import CONST
+from org.openbaton.cli.agents.agents import MainAgent
+from org.openbaton.cli.errors.errors import NfvoException
+
+
+def servertest(host, port):
+ args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
+ for family, socktype, proto, canonname, sockaddr in args:
+ s = socket.socket(family, socktype, proto)
+ try:
+ s.connect(sockaddr)
+ except socket.error:
+ return False
+ else:
+ s.close()
+ return True
+
+# ----------------------------------------------------------
+#
+# UTILS
+#
+# -----------------------------------------------------------
+
+
+def get_config(parameter, file):
+ """
+ Returns the value of a given parameter in file.yaml
+ parameter must be given in string format with dots
+ Example: general.openstack.image_name
+ """
+ with open(file) as f:
+ file_yaml = yaml.safe_load(f)
+ f.close()
+ value = file_yaml
+ for element in parameter.split("."):
+ value = value.get(element)
+ if value is None:
+ raise ValueError("The parameter %s is not defined in"
+ " reporting.yaml" % parameter)
+ return value
+
+
+def download_and_add_image_on_glance(glance, image_name,
+ image_url, data_dir):
+ dest_path = data_dir
+ if not os.path.exists(dest_path):
+ os.makedirs(dest_path)
+ file_name = image_url.rsplit('/')[-1]
+ if not ft_utils.download_url(image_url, dest_path):
+ return False
+ image = os_utils.create_glance_image(
+ glance, image_name, dest_path + file_name)
+ if not image:
+ return False
+ return image
+
class ImsVnf(vnf_base.VnfOnBoardingBase):
- def __init__(self, project='functest', case='opera_ims',
+ def __init__(self, project='functest', case='orchestra_ims',
repo='', cmd=''):
super(ImsVnf, self).__init__(project, case, repo, cmd)
- self.logger = ft_logger.Logger("vIMS").getLogger()
- self.case_dir = os.path.join(CONST.functest_test, 'vnf/ims/')
- self.data_dir = CONST.dir_vIMS_data
+ self.ob_password = "openbaton"
+ self.ob_username = "admin"
+ self.ob_https = False
+ self.ob_port = "8080"
+ self.ob_ip = "localhost"
+ self.ob_instance_id = ""
+ self.logger = ft_logger.Logger("orchestra_ims").getLogger()
+ self.case_dir = os.path.join(CONST.dir_functest_test, 'vnf/ims/')
+ self.data_dir = CONST.dir_ims_data
self.test_dir = CONST.dir_repo_vims_test
-
+ self.ob_projectid = ""
+ self.keystone_client = os_utils.get_keystone_client()
+ self.ob_nsr_id = ""
+ self.main_agent = None
# vIMS Data directory creation
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
+ # Retrieve the configuration
+ try:
+ self.config = CONST.__getattribute__(
+ 'vnf_{}_config'.format(self.case_name))
+ except:
+ raise Exception("Orchestra VNF config file not found")
+ config_file = self.case_dir + self.config
+ self.imagename = get_config("openbaton.imagename", config_file)
+ self.market_link = get_config("openbaton.marketplace_link",
+ config_file)
+ self.images = get_config("tenant_images", config_file)
def deploy_orchestrator(self, **kwargs):
- # TODO
- # deploy open-O from Functest docker located on the Jumphost
- # you have admin rights on OpenStack SUT
- # you can cretae a VM, spawn docker on the jumphost
- # spawn docker on a VM in the SUT, ..up to you
- #
- # note: this step can be ignored
- # if Open-O is part of the installer
+ self.logger.info("Additional pre-configuration steps")
+ nova_client = os_utils.get_nova_client()
+ neutron_client = os_utils.get_neutron_client()
+ glance_client = os_utils.get_glance_client()
+
+ # needs some images
+ self.logger.info("Upload some OS images if it doesn't exist")
+ temp_dir = os.path.join(self.data_dir, "tmp/")
+ for image_name, image_url in self.images.iteritems():
+ self.logger.info("image: %s, url: %s" % (image_name, image_url))
+ try:
+ image_id = os_utils.get_image_id(glance_client,
+ image_name)
+ self.logger.info("image_id: %s" % image_id)
+ except:
+ self.logger.error("Unexpected error: %s" % sys.exc_info()[0])
+
+ if image_id == '':
+ self.logger.info("""%s image doesn't exist on glance repository. Try
+ downloading this image and upload on glance !""" % image_name)
+ image_id = download_and_add_image_on_glance(glance_client,
+ image_name,
+ image_url,
+ temp_dir)
+ if image_id == '':
+ self.step_failure(
+ "Failed to find or upload required OS "
+ "image for this deployment")
+ network_dic = os_utils.create_network_full(neutron_client,
+ "openbaton_mgmt",
+ "openbaton_mgmt_subnet",
+ "openbaton_router",
+ "192.168.100.0/24")
+
+ # orchestrator VM flavor
+ self.logger.info("Check medium Flavor is available, if not create one")
+ flavor_exist, flavor_id = os_utils.get_or_create_flavor(
+ "m1.medium",
+ "4096",
+ '1',
+ '2',
+ public=True)
+ self.logger.debug("Flavor id: %s" % flavor_id)
+
+ if not network_dic:
+ self.logger.error("There has been a problem when creating the "
+ "neutron network")
+
+ network_id = network_dic["net_id"]
+
+ self.logger.info("Creating floating IP for VM in advance...")
+ floatip_dic = os_utils.create_floating_ip(neutron_client)
+ floatip = floatip_dic['fip_addr']
+
+ if floatip is None:
+ self.logger.error("Cannot create floating IP.")
+
+ userdata = "#!/bin/bash\n"
+ userdata += "set -x\n"
+ userdata += "set -e\n"
+ userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
+ userdata += "apt-get install curl\n"
+ userdata += ("echo \"rabbitmq_broker_ip=%s\" > ./config_file\n"
+ % floatip)
+ userdata += "echo \"mysql=no\" >> ./config_file\n"
+ userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3"
+ "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc"
+ "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv"
+ "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S"
+ "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O"
+ "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc"
+ "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya"
+ "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut"
+ "horized_keys\n")
+ userdata += "cat ./config_file\n"
+ userdata += ("curl -s http://get.openbaton.org/bootstrap "
+ "> ./bootstrap\n")
+ userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
+ bootstrap = "sh ./bootstrap release -configFile=./config_file"
+ userdata += bootstrap + "\n"
+
+ userdata += ("echo \"nfvo.plugin.timeout=300000\" >> "
+ "/etc/openbaton/openbaton-nfvo.properties\n")
+ userdata += "service openbaton-nfvo restart\n"
+ userdata += "service openbaton-vnfm-generic restart\n"
+
+ sg_id = os_utils.create_security_group_full(neutron_client,
+ "orchestra-sec-group",
+ "allowall")
+
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
+ "icmp", 0, 255)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
+ "icmp", 0, 255)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
+ "tcp", 1, 65535)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
+ "udp", 1, 65535)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
+ "tcp", 1, 65535)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
+ "udp", 1, 65535)
+
+ self.logger.info("Security group set")
+
+ self.logger.info("Create instance....")
+ self.logger.info("flavor: m1.medium\n"
+ "image: %s\n"
+ "network_id: %s\n"
+ "userdata: %s\n"
+ % (self.imagename, network_id, userdata))
+
+ instance = os_utils.create_instance_and_wait_for_active(
+ "m1.medium",
+ os_utils.get_image_id(glance_client, self.imagename),
+ network_id,
+ "orchestra-openbaton",
+ config_drive=False,
+ userdata=userdata)
+
+ self.ob_instance_id = instance.id
+
+ self.logger.info("Adding sec group to orchestra instance")
+ os_utils.add_secgroup_to_instance(nova_client,
+ self.ob_instance_id, sg_id)
+
+ self.logger.info("Associating floating ip: '%s' to VM '%s' "
+ % (floatip, "orchestra-openbaton"))
+ if not os_utils.add_floating_ip(nova_client, instance.id, floatip):
+ self.logger.error("Cannot associate floating IP to VM.")
+ self.step_failure("Cannot associate floating IP to VM.")
+
+ self.logger.info("Waiting for nfvo to be up and running...")
+ x = 0
+ while x < 100:
+ if servertest(floatip, "8080"):
+ break
+ else:
+ self.logger.debug("openbaton is not started yet")
+ time.sleep(5)
+ x += 1
+
+ if x == 100:
+ self.logger.error("Openbaton is not started correctly")
+ self.step_failure("Openbaton is not started correctly")
+
+ self.ob_ip = floatip
+ self.ob_password = "openbaton"
+ self.ob_username = "admin"
+ self.ob_https = False
+ self.ob_port = "8080"
+
self.logger.info("Deploy orchestrator: OK")
def deploy_vnf(self):
- # TODO
- self.logger.info("Deploy VNF: OK")
+ self.logger.info("vIMS Deployment")
+
+ self.main_agent = MainAgent(nfvo_ip=self.ob_ip,
+ nfvo_port=self.ob_port,
+ https=self.ob_https,
+ version=1,
+ username=self.ob_username,
+ password=self.ob_password)
+
+ project_agent = self.main_agent.get_agent("project", self.ob_projectid)
+ for p in json.loads(project_agent.find()):
+ if p.get("name") == "default":
+ self.ob_projectid = p.get("id")
+ break
+
+ self.logger.debug("project id: %s" % self.ob_projectid)
+ if self.ob_projectid == "":
+ self.logger.error("Default project id was not found!")
+ self.step_failure("Default project id was not found!")
+
+ vim_json = {
+ "name": "vim-instance",
+ "authUrl": os_utils.get_credentials().get("auth_url"),
+ "tenant": os_utils.get_credentials().get("tenant_name"),
+ "username": os_utils.get_credentials().get("username"),
+ "password": os_utils.get_credentials().get("password"),
+ # "keyPair": "opnfv",
+ # TODO change the keypair to correct value
+ # or upload a correct one or remove it
+ "securityGroups": [
+ "default",
+ "orchestra-sec-group"
+ ],
+ "type": "openstack",
+ "location": {
+ "name": "opnfv",
+ "latitude": "52.525876",
+ "longitude": "13.314400"
+ }
+ }
+
+ self.logger.debug("vim: %s" % vim_json)
+
+ self.main_agent.get_agent(
+ "vim",
+ project_id=self.ob_projectid).create(entity=json.dumps(vim_json))
+
+ market_agent = self.main_agent.get_agent("market",
+ project_id=self.ob_projectid)
+
+ nsd = {}
+ try:
+ self.logger.info("sending: %s" % self.market_link)
+ nsd = market_agent.create(entity=self.market_link)
+ self.logger.info("Onboarded nsd: " + nsd.get("name"))
+ except NfvoException as e:
+ self.step_failure(e.message)
+
+ nsr_agent = self.main_agent.get_agent("nsr",
+ project_id=self.ob_projectid)
+ nsd_id = nsd.get('id')
+ if nsd_id is None:
+ self.step_failure("NSD not onboarded correctly")
+
+ nsr = None
+ try:
+ nsr = nsr_agent.create(nsd_id)
+ except NfvoException as e:
+ self.step_failure(e.message)
+
+ if nsr.get('code') is not None:
+ self.logger.error(
+ "vIMS cannot be deployed: %s -> %s" %
+ (nsr.get('code'), nsr.get('message')))
+ self.step_failure("vIMS cannot be deployed")
+
+ i = 0
+ self.logger.info("waiting NSR to go to active...")
+ while nsr.get("status") != 'ACTIVE' and nsr.get("status") != 'ERROR':
+ i += 1
+ if i == 100:
+ self.step_failure("After %s sec the nsr did not go to active.."
+ % 5 * 100)
+ time.sleep(5)
+ nsr = json.loads(nsr_agent.find(nsr.get('id')))
+
+ if nsr.get("status") == 'ACTIVE':
+ deploy_vnf = {'status': "PASS", 'result': nsr}
+ self.logger.info("Deploy VNF: OK")
+ else:
+ deploy_vnf = {'status': "FAIL", 'result': nsr}
+ self.logger.error("Deploy VNF: ERROR")
+ self.step_failure("Deploy vIMS failed")
+ self.ob_nsr_id = nsr.get("id")
+ return deploy_vnf
def test_vnf(self):
# Adaptations probably needed
# code used for cloudify_ims
# ruby client on jumphost calling the vIMS on the SUT
- script = "source {0}venv_cloudify/bin/activate; "
- script += "cd {0}; "
- script += "cfy status | grep -Eo \"([0-9]{{1,3}}\.){{3}}[0-9]{{1,3}}\""
- cmd = "/bin/bash -c '" + script.format(self.data_dir) + "'"
+ return
- try:
- self.logger.debug("Trying to get clearwater manager IP ... ")
- mgr_ip = os.popen(cmd).read()
- mgr_ip = mgr_ip.splitlines()[0]
- except:
- self.step_failure("Unable to retrieve the IP of the "
- "cloudify manager server !")
-
- api_url = "http://" + mgr_ip + "/api/v2"
- dep_outputs = requests.get(api_url + "/deployments/" +
- self.vnf.deployment_name + "/outputs")
- dns_ip = dep_outputs.json()['outputs']['dns_ip']
- ellis_ip = dep_outputs.json()['outputs']['ellis_ip']
-
- ellis_url = "http://" + ellis_ip + "/"
- url = ellis_url + "accounts"
-
- params = {"password": "functest",
- "full_name": "opnfv functest user",
- "email": "functest@opnfv.fr",
- "signup_code": "secret"}
-
- rq = requests.post(url, data=params)
- i = 20
- while rq.status_code != 201 and i > 0:
- rq = requests.post(url, data=params)
- i = i - 1
- time.sleep(10)
-
- if rq.status_code == 201:
- url = ellis_url + "session"
- rq = requests.post(url, data=params)
- cookies = rq.cookies
-
- url = ellis_url + "accounts/" + params['email'] + "/numbers"
- if cookies != "":
- rq = requests.post(url, cookies=cookies)
- i = 24
- while rq.status_code != 200 and i > 0:
- rq = requests.post(url, cookies=cookies)
- i = i - 1
- time.sleep(25)
-
- if rq.status_code != 200:
- self.step_failure("Unable to create a number: %s"
- % rq.json()['reason'])
-
- nameservers = ft_utils.get_resolvconf_ns()
- resolvconf = ""
- for ns in nameservers:
- resolvconf += "\nnameserver " + ns
-
- if dns_ip != "":
- script = ('echo -e "nameserver ' + dns_ip + resolvconf +
- '" > /etc/resolv.conf; ')
- script += 'source /etc/profile.d/rvm.sh; '
- script += 'cd {0}; '
- script += ('rake test[{1}] SIGNUP_CODE="secret"')
-
- cmd = ("/bin/bash -c '" +
- script.format(self.data_dir, self.inputs["public_domain"]) +
- "'")
- output_file = "output.txt"
- f = open(output_file, 'w+')
- subprocess.call(cmd, shell=True, stdout=f,
- stderr=subprocess.STDOUT)
- f.close()
-
- f = open(output_file, 'r')
- result = f.read()
- if result != "":
- self.logger.debug(result)
-
- vims_test_result = ""
- tempFile = os.path.join(self.test_dir, "temp.json")
- try:
- self.logger.debug("Trying to load test results")
- with open(tempFile) as f:
- vims_test_result = json.load(f)
- f.close()
- except:
- self.logger.error("Unable to retrieve test results")
+ def clean(self):
+ self.main_agent.get_agent(
+ "nsr",
+ project_id=self.ob_projectid).delete(self.ob_nsr_id)
+ time.sleep(5)
+ os_utils.delete_instance(nova_client=os_utils.get_nova_client(),
+ instance_id=self.ob_instance_id)
+ # TODO question is the clean removing also the VM?
+ # I think so since is goinf to remove the tenant...
+ super(ImsVnf, self).clean()
- try:
- os.remove(tempFile)
- except:
- self.logger.error("Deleting file failed")
+ def main(self, **kwargs):
+ self.logger.info("Orchestra IMS VNF onboarding test starting")
+ self.execute()
+ self.logger.info("Orchestra IMS VNF onboarding test executed")
+ if self.criteria is "PASS":
+ return self.EX_OK
+ else:
+ return self.EX_RUN_ERROR
- if vims_test_result != '':
- return {'status': 'PASS', 'result': vims_test_result}
- else:
- return {'status': 'FAIL', 'result': ''}
+ def run(self):
+ kwargs = {}
+ return self.main(**kwargs)
- def clean(self):
- # TODO
- super(ImsVnf, self).clean()
+
+if __name__ == '__main__':
+ test = ImsVnf()
+ test.deploy_orchestrator()
+ test.deploy_vnf()
+ test.clean()
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.py b/functest/opnfv_tests/vnf/ims/orchestra_ims.py
index 28f37f05..352b609b 100644
--- a/functest/opnfv_tests/vnf/ims/orchestra_ims.py
+++ b/functest/opnfv_tests/vnf/ims/orchestra_ims.py
@@ -8,150 +8,383 @@
# http://www.apache.org/licenses/LICENSE-2.0
import json
-import os
-import requests
-import subprocess
+import socket
+import sys
import time
+import yaml
import functest.core.vnf_base as vnf_base
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+import os
from functest.utils.constants import CONST
+from org.openbaton.cli.agents.agents import MainAgent
+from org.openbaton.cli.errors.errors import NfvoException
-class ImsVnf(vnf_base.VnfOnBoardingBase):
+def servertest(host, port):
+ args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
+ for family, socktype, proto, canonname, sockaddr in args:
+ s = socket.socket(family, socktype, proto)
+ try:
+ s.connect(sockaddr)
+ except socket.error:
+ return False
+ else:
+ s.close()
+ return True
+
+
+class ImsVnf(vnf_base.VnfOnBoardingBase):
def __init__(self, project='functest', case='orchestra_ims',
repo='', cmd=''):
super(ImsVnf, self).__init__(project, case, repo, cmd)
- self.logger = ft_logger.Logger("vIMS").getLogger()
- self.case_dir = os.path.join(CONST.functest_test, 'vnf/ims/')
- self.data_dir = CONST.dir_vIMS_data
+ self.ob_password = "openbaton"
+ self.ob_username = "admin"
+ self.ob_https = False
+ self.ob_port = "8080"
+ self.ob_ip = "localhost"
+ self.ob_instance_id = ""
+ self.logger = ft_logger.Logger("orchestra_ims").getLogger()
+ self.case_dir = os.path.join(CONST.dir_functest_test, 'vnf/ims/')
+ self.data_dir = CONST.dir_ims_data
self.test_dir = CONST.dir_repo_vims_test
-
+ self.ob_projectid = ""
+ self.keystone_client = os_utils.get_keystone_client()
+ self.ob_nsr_id = ""
+ self.main_agent = None
# vIMS Data directory creation
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
+ # Retrieve the configuration
+ try:
+ self.config = CONST.__getattribute__(
+ 'vnf_{}_config'.format(self.case_name))
+ except:
+ raise Exception("Orchestra VNF config file not found")
+ config_file = self.case_dir + self.config
+ self.imagename = get_config("openbaton.imagename", config_file)
+ self.market_link = get_config("openbaton.marketplace_link",
+ config_file)
+ self.images = get_config("tenant_images", config_file)
def deploy_orchestrator(self, **kwargs):
- # TODO
- # put your code here to deploy openbaton
- # from the functest docker located on the jumphost
- # you have admin rights on OpenStack SUT
- # you can cretae a VM, spawn docker on the jumphost
- # spawn docker on a VM in the SUT, ..up to you
- #
- # note: this step can be ignored
- # if OpenBaton is part of the installer
+ self.logger.info("Additional pre-configuration steps")
+ nova_client = os_utils.get_nova_client()
+ neutron_client = os_utils.get_neutron_client()
+ glance_client = os_utils.get_glance_client()
+
+ # Import images if needed
+ self.logger.info("Upload some OS images if it doesn't exist")
+ temp_dir = os.path.join(self.data_dir, "tmp/")
+ for image_name, image_url in self.images.iteritems():
+ self.logger.info("image: %s, url: %s" % (image_name, image_url))
+ try:
+ image_id = os_utils.get_image_id(glance_client,
+ image_name)
+ self.logger.info("image_id: %s" % image_id)
+ except:
+ self.logger.error("Unexpected error: %s" % sys.exc_info()[0])
+
+ if image_id == '':
+ self.logger.info("""%s image doesn't exist on glance repository. Try
+ downloading this image and upload on glance !""" % image_name)
+ image_id = download_and_add_image_on_glance(glance_client,
+ image_name,
+ image_url,
+ temp_dir)
+ if image_id == '':
+ self.step_failure(
+ "Failed to find or upload required OS "
+ "image for this deployment")
+ network_dic = os_utils.create_network_full(neutron_client,
+ "openbaton_mgmt",
+ "openbaton_mgmt_subnet",
+ "openbaton_router",
+ "192.168.100.0/24")
+
+ # orchestrator VM flavor
+ self.logger.info("Check medium Flavor is available, if not create one")
+ flavor_exist, flavor_id = os_utils.get_or_create_flavor(
+ "m1.medium",
+ "4096",
+ '20',
+ '2',
+ public=True)
+ self.logger.debug("Flavor id: %s" % flavor_id)
+
+ if not network_dic:
+ self.logger.error("There has been a problem when creating the "
+ "neutron network")
+
+ network_id = network_dic["net_id"]
+
+ self.logger.info("Creating floating IP for VM in advance...")
+ floatip_dic = os_utils.create_floating_ip(neutron_client)
+ floatip = floatip_dic['fip_addr']
+
+ if floatip is None:
+ self.logger.error("Cannot create floating IP.")
+
+ userdata = "#!/bin/bash\n"
+ userdata += "set -x\n"
+ userdata += "set -e\n"
+ userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
+ userdata += "apt-get install curl\n"
+ userdata += ("echo \"rabbitmq_broker_ip=%s\" > ./config_file\n"
+ % floatip)
+ userdata += "echo \"mysql=no\" >> ./config_file\n"
+ userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3"
+ "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc"
+ "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv"
+ "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S"
+ "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O"
+ "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc"
+ "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya"
+ "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut"
+ "horized_keys\n")
+ userdata += "cat ./config_file\n"
+ userdata += ("curl -s http://get.openbaton.org/bootstrap "
+ "> ./bootstrap\n")
+ userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
+ bootstrap = "sh ./bootstrap release -configFile=./config_file"
+ userdata += bootstrap + "\n"
+
+ userdata += ("echo \"nfvo.plugin.timeout=300000\" >> "
+ "/etc/openbaton/openbaton-nfvo.properties\n")
+ userdata += "service openbaton-nfvo restart\n"
+ userdata += "service openbaton-vnfm-generic restart\n"
+
+ sg_id = os_utils.create_security_group_full(neutron_client,
+ "orchestra-sec-group",
+ "allowall")
+
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
+ "icmp", 0, 255)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
+ "icmp", 0, 255)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
+ "tcp", 1, 65535)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
+ "udp", 1, 65535)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
+ "tcp", 1, 65535)
+ os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
+ "udp", 1, 65535)
+
+ self.logger.info("Security group set")
+
+ self.logger.info("Create instance....")
+ self.logger.info("flavor: m1.medium\n"
+ "image: %s\n"
+ "network_id: %s\n"
+ "userdata: %s\n"
+ % (self.imagename, network_id, userdata))
+
+ instance = os_utils.create_instance_and_wait_for_active(
+ "m1.medium",
+ os_utils.get_image_id(glance_client, self.imagename),
+ network_id,
+ "orchestra-openbaton",
+ config_drive=False,
+ userdata=userdata)
+
+ self.ob_instance_id = instance.id
+
+ self.logger.info("Adding sec group to orchestra instance")
+ os_utils.add_secgroup_to_instance(nova_client,
+ self.ob_instance_id, sg_id)
+
+ self.logger.info("Associating floating ip: '%s' to VM '%s' "
+ % (floatip, "orchestra-openbaton"))
+ if not os_utils.add_floating_ip(nova_client, instance.id, floatip):
+ self.logger.error("Cannot associate floating IP to VM.")
+ self.step_failure("Cannot associate floating IP to VM.")
+
+ self.logger.info("Waiting for nfvo to be up and running...")
+ x = 0
+ while x < 100:
+ if servertest(floatip, "8080"):
+ break
+ else:
+ self.logger.debug("openbaton is not started yet")
+ time.sleep(5)
+ x += 1
+
+ if x == 100:
+ self.logger.error("Openbaton is not started correctly")
+ self.step_failure("Openbaton is not started correctly")
+
+ self.ob_ip = floatip
+ self.ob_password = "openbaton"
+ self.ob_username = "admin"
+ self.ob_https = False
+ self.ob_port = "8080"
+
self.logger.info("Deploy orchestrator: OK")
def deploy_vnf(self):
- # deploy the VNF
- # call openbaton to deploy the vIMS
+ self.logger.info("vIMS Deployment")
+
+ self.main_agent = MainAgent(nfvo_ip=self.ob_ip,
+ nfvo_port=self.ob_port,
+ https=self.ob_https,
+ version=1,
+ username=self.ob_username,
+ password=self.ob_password)
+
+ project_agent = self.main_agent.get_agent("project", self.ob_projectid)
+ for p in json.loads(project_agent.find()):
+ if p.get("name") == "default":
+ self.ob_projectid = p.get("id")
+ break
+
+ self.logger.debug("project id: %s" % self.ob_projectid)
+ if self.ob_projectid == "":
+ self.logger.error("Default project id was not found!")
+ self.step_failure("Default project id was not found!")
+
+ vim_json = {
+ "name": "vim-instance",
+ "authUrl": os_utils.get_credentials().get("auth_url"),
+ "tenant": os_utils.get_credentials().get("tenant_name"),
+ "username": os_utils.get_credentials().get("username"),
+ "password": os_utils.get_credentials().get("password"),
+ "keyPair": "opnfv",
+ # TODO change the keypair to correct value
+ # or upload a correct one or remove it
+ "securityGroups": [
+ "default",
+ "orchestra-sec-group"
+ ],
+ "type": "openstack",
+ "location": {
+ "name": "opnfv",
+ "latitude": "52.525876",
+ "longitude": "13.314400"
+ }
+ }
+
+ self.logger.debug("vim: %s" % vim_json)
+
+ self.main_agent.get_agent(
+ "vim",
+ project_id=self.ob_projectid).create(entity=json.dumps(vim_json))
+
+ market_agent = self.main_agent.get_agent("market",
+ project_id=self.ob_projectid)
+
+ nsd = {}
+ try:
+ self.logger.info("sending: %s" % self.market_link)
+ nsd = market_agent.create(entity=self.market_link)
+ self.logger.info("Onboarded nsd: " + nsd.get("name"))
+ except NfvoException as e:
+ self.step_failure(e.message)
+
+ nsr_agent = self.main_agent.get_agent("nsr",
+ project_id=self.ob_projectid)
+ nsd_id = nsd.get('id')
+ if nsd_id is None:
+ self.step_failure("NSD not onboarded correctly")
+
+ nsr = None
+ try:
+ nsr = nsr_agent.create(nsd_id)
+ except NfvoException as e:
+ self.step_failure(e.message)
+
+ if nsr is None:
+ self.step_failure("NSR not deployed correctly")
+
+ i = 0
+ self.logger.info("waiting NSR to go to active...")
+ while nsr.get("status") != 'ACTIVE':
+ i += 1
+ if i == 100:
+ self.step_failure("After %s sec the nsr did not go to active.."
+ % 5 * 100)
+ time.sleep(5)
+ nsr = json.loads(nsr_agent.find(nsr.get('id')))
+
+ deploy_vnf = {'status': "PASS", 'result': nsr}
+ self.ob_nsr_id = nsr.get("id")
self.logger.info("Deploy VNF: OK")
+ return deploy_vnf
def test_vnf(self):
# Adaptations probably needed
# code used for cloudify_ims
# ruby client on jumphost calling the vIMS on the SUT
- script = "source {0}venv_cloudify/bin/activate; "
- script += "cd {0}; "
- script += "cfy status | grep -Eo \"([0-9]{{1,3}}\.){{3}}[0-9]{{1,3}}\""
- cmd = "/bin/bash -c '" + script.format(self.data_dir) + "'"
+ return
- try:
- self.logger.debug("Trying to get clearwater manager IP ... ")
- mgr_ip = os.popen(cmd).read()
- mgr_ip = mgr_ip.splitlines()[0]
- except:
- self.step_failure("Unable to retrieve the IP of the "
- "cloudify manager server !")
-
- api_url = "http://" + mgr_ip + "/api/v2"
- dep_outputs = requests.get(api_url + "/deployments/" +
- self.vnf.deployment_name + "/outputs")
- dns_ip = dep_outputs.json()['outputs']['dns_ip']
- ellis_ip = dep_outputs.json()['outputs']['ellis_ip']
-
- ellis_url = "http://" + ellis_ip + "/"
- url = ellis_url + "accounts"
-
- params = {"password": "functest",
- "full_name": "opnfv functest user",
- "email": "functest@opnfv.fr",
- "signup_code": "secret"}
-
- rq = requests.post(url, data=params)
- i = 20
- while rq.status_code != 201 and i > 0:
- rq = requests.post(url, data=params)
- i = i - 1
- time.sleep(10)
-
- if rq.status_code == 201:
- url = ellis_url + "session"
- rq = requests.post(url, data=params)
- cookies = rq.cookies
-
- url = ellis_url + "accounts/" + params['email'] + "/numbers"
- if cookies != "":
- rq = requests.post(url, cookies=cookies)
- i = 24
- while rq.status_code != 200 and i > 0:
- rq = requests.post(url, cookies=cookies)
- i = i - 1
- time.sleep(25)
-
- if rq.status_code != 200:
- self.step_failure("Unable to create a number: %s"
- % rq.json()['reason'])
-
- nameservers = ft_utils.get_resolvconf_ns()
- resolvconf = ""
- for ns in nameservers:
- resolvconf += "\nnameserver " + ns
-
- if dns_ip != "":
- script = ('echo -e "nameserver ' + dns_ip + resolvconf +
- '" > /etc/resolv.conf; ')
- script += 'source /etc/profile.d/rvm.sh; '
- script += 'cd {0}; '
- script += ('rake test[{1}] SIGNUP_CODE="secret"')
-
- cmd = ("/bin/bash -c '" +
- script.format(self.data_dir, self.inputs["public_domain"]) +
- "'")
- output_file = "output.txt"
- f = open(output_file, 'w+')
- subprocess.call(cmd, shell=True, stdout=f,
- stderr=subprocess.STDOUT)
- f.close()
-
- f = open(output_file, 'r')
- result = f.read()
- if result != "":
- self.logger.debug(result)
-
- vims_test_result = ""
- tempFile = os.path.join(self.test_dir, "temp.json")
- try:
- self.logger.debug("Trying to load test results")
- with open(tempFile) as f:
- vims_test_result = json.load(f)
- f.close()
- except:
- self.logger.error("Unable to retrieve test results")
+ def clean(self):
+ self.main_agent.get_agent(
+ "nsr",
+ project_id=self.ob_projectid).delete(self.ob_nsr_id)
+ time.sleep(5)
+ os_utils.delete_instance(nova_client=os_utils.get_nova_client(),
+ instance_id=self.ob_instance_id)
+ # TODO question is the clean removing also the VM?
+ # I think so since is goinf to remove the tenant...
+ super(ImsVnf, self).clean()
- try:
- os.remove(tempFile)
- except:
- self.logger.error("Deleting file failed")
+ def main(self, **kwargs):
+ self.logger.info("Orchestra IMS VNF onboarding test starting")
+ self.execute()
+ self.logger.info("Orchestra IMS VNF onboarding test executed")
+ if self.criteria is "PASS":
+ return self.EX_OK
+ else:
+ return self.EX_RUN_ERROR
- if vims_test_result != '':
- return {'status': 'PASS', 'result': vims_test_result}
- else:
- return {'status': 'FAIL', 'result': ''}
+ def run(self):
+ kwargs = {}
+ return self.main(**kwargs)
- def clean(self):
- # TODO
- super(ImsVnf, self).clean()
+
+if __name__ == '__main__':
+ test = ImsVnf()
+ test.deploy_orchestrator()
+ test.deploy_vnf()
+ test.clean()
+
+
+# ----------------------------------------------------------
+#
+# UTILS
+#
+# -----------------------------------------------------------
+def get_config(parameter, file):
+ """
+ Returns the value of a given parameter in file.yaml
+ parameter must be given in string format with dots
+ Example: general.openstack.image_name
+ """
+ with open(file) as f:
+ file_yaml = yaml.safe_load(f)
+ f.close()
+ value = file_yaml
+ for element in parameter.split("."):
+ value = value.get(element)
+ if value is None:
+ raise ValueError("The parameter %s is not defined in"
+ " reporting.yaml" % parameter)
+ return value
+
+
+def download_and_add_image_on_glance(glance, image_name,
+ image_url, data_dir):
+ dest_path = data_dir
+ if not os.path.exists(dest_path):
+ os.makedirs(dest_path)
+ file_name = image_url.rsplit('/')[-1]
+ if not ft_utils.download_url(image_url, dest_path):
+ return False
+ image = os_utils.create_glance_image(
+ glance, image_name, dest_path + file_name)
+ if not image:
+ return False
+ return image
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml b/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml
new file mode 100644
index 00000000..2fb33df5
--- /dev/null
+++ b/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml
@@ -0,0 +1,7 @@
+tenant_images:
+ ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ openims: http://marketplace.openbaton.org:8082/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img
+openbaton:
+ bootstrap: sh <(curl -s http://get.openbaton.org/bootstrap) release -configFile=
+ marketplace_link: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/OpenImsCore/3.2.0/json
+ imagename: ubuntu_14.04
diff --git a/functest/opnfv_tests/vnf/ims/orchestrator_cloudify.py b/functest/opnfv_tests/vnf/ims/orchestrator_cloudify.py
index f3838f87..82a9dca0 100644
--- a/functest/opnfv_tests/vnf/ims/orchestrator_cloudify.py
+++ b/functest/opnfv_tests/vnf/ims/orchestrator_cloudify.py
@@ -21,7 +21,7 @@ from git import Repo
import functest.utils.functest_logger as ft_logger
-class Orchestrator:
+class Orchestrator(object):
def __init__(self, testcase_dir, inputs={}):
self.testcase_dir = testcase_dir
@@ -114,6 +114,7 @@ class Orchestrator:
cmd = "/bin/bash -c '" + script + "'"
error = execute_command(cmd, self.logger)
if error:
+ self.logger.error("Failed to deploy cloudify-manager")
return error
self.logger.info("Cloudify-manager server is UP !")
@@ -171,6 +172,7 @@ class Orchestrator:
cmd = "/bin/bash -c '" + script + "'"
error = execute_command(cmd, self.logger, 2000)
if error:
+ self.logger.error("Failed to deploy blueprint")
return error
self.logger.info("The deployment of {0} is ended".format(dep_name))
@@ -228,7 +230,4 @@ def execute_command(cmd, logger, timeout=1800):
logger.error("Error when executing command %s" % cmd)
f = open(output_file, 'r')
lines = f.readlines()
- result = lines[len(lines) - 3]
- result += lines[len(lines) - 2]
- result += lines[len(lines) - 1]
- return result
+ return lines[-5:]
diff --git a/functest/opnfv_tests/vnf/router/__init__.py b/functest/opnfv_tests/vnf/router/__init__.py
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/functest/opnfv_tests/vnf/router/__init__.py
diff --git a/functest/opnfv_tests/vnf/router/vyos_vrouter.py b/functest/opnfv_tests/vnf/router/vyos_vrouter.py
new file mode 100755
index 00000000..94a3ecfd
--- /dev/null
+++ b/functest/opnfv_tests/vnf/router/vyos_vrouter.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 Okinawa Open Laboratory
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+import functest.core.feature_base as base
+import json
+import os
+
+RESULT_DETAILS_FILE = "test_result.json"
+
+
+class VrouterVnf(base.FeatureBase):
+ def __init__(self):
+ super(VrouterVnf, self).__init__(project='vRouter',
+ case='vyos_vrouter',
+ repo='dir_repo_vrouter')
+ self.cmd = 'cd %s && ./run.sh' % self.repo
+
+ def set_result_details(self):
+ filepath = os.path.join(self.repo, RESULT_DETAILS_FILE)
+ if os.path.exists(filepath):
+ f = open(filepath, 'r')
+ self.details = json.load(f)
+ f.close()
+
+ def log_results(self):
+ if self.criteria == 'PASS':
+ self.set_result_details()
+ super(VrouterVnf, self).log_results()
diff --git a/functest/tests/unit/opnfv_tests/vnf/__init__.py b/functest/tests/unit/opnfv_tests/vnf/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/functest/tests/unit/opnfv_tests/vnf/__init__.py
diff --git a/functest/tests/unit/opnfv_tests/vnf/ims/__init__.py b/functest/tests/unit/opnfv_tests/vnf/ims/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/functest/tests/unit/opnfv_tests/vnf/ims/__init__.py
diff --git a/functest/tests/unit/opnfv_tests/vnf/ims/test_clearwater.py b/functest/tests/unit/opnfv_tests/vnf/ims/test_clearwater.py
new file mode 100644
index 00000000..527f12e5
--- /dev/null
+++ b/functest/tests/unit/opnfv_tests/vnf/ims/test_clearwater.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import logging
+import unittest
+
+import mock
+
+from functest.opnfv_tests.vnf.ims import clearwater
+from functest.opnfv_tests.vnf.ims import orchestrator_cloudify
+
+
+class ClearwaterTesting(unittest.TestCase):
+
+ logging.disable(logging.CRITICAL)
+
+ def setUp(self):
+ self.clearwater = clearwater.Clearwater()
+ self.orchestrator = orchestrator_cloudify.Orchestrator('test_dir')
+ self.clearwater.orchestrator = self.orchestrator
+ self.clearwater.dep_name = 'test_dep_name'
+ self.bp = {'file_name': 'test_file',
+ 'destination_folder': 'test_folder',
+ 'url': 'test_url',
+ 'branch': 'test_branch'}
+
+ def test_deploy_vnf_blueprint_download_failed(self):
+ with mock.patch.object(self.clearwater.orchestrator,
+ 'download_upload_and_deploy_blueprint',
+ return_value='error'):
+ self.assertEqual(self.clearwater.deploy_vnf(self.bp),
+ 'error')
+
+ def test_deploy_vnf_blueprint_download_passed(self):
+ with mock.patch.object(self.clearwater.orchestrator,
+ 'download_upload_and_deploy_blueprint',
+ return_value=''):
+ self.clearwater.deploy_vnf(self.bp),
+ self.assertEqual(self.clearwater.deploy, True)
+
+ def test_undeploy_vnf_deployment_passed(self):
+ with mock.patch.object(self.clearwater.orchestrator,
+ 'undeploy_deployment'):
+ self.clearwater.deploy = True
+ self.clearwater.undeploy_vnf(),
+ self.assertEqual(self.clearwater.deploy, False)
+
+
+if __name__ == "__main__":
+ unittest.main(verbosity=2)
diff --git a/functest/tests/unit/opnfv_tests/vnf/ims/test_cloudify_ims.py b/functest/tests/unit/opnfv_tests/vnf/ims/test_cloudify_ims.py
new file mode 100644
index 00000000..e25816f0
--- /dev/null
+++ b/functest/tests/unit/opnfv_tests/vnf/ims/test_cloudify_ims.py
@@ -0,0 +1,542 @@
+#!/usr/bin/env python
+
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import logging
+import unittest
+
+import mock
+
+from functest.opnfv_tests.vnf.ims import cloudify_ims
+
+
+class ImsVnfTesting(unittest.TestCase):
+
+ logging.disable(logging.CRITICAL)
+
+ def setUp(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os.makedirs'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'get_config', return_value='config_value'):
+ self.ims_vnf = cloudify_ims.ImsVnf()
+ self.neutron_client = mock.Mock()
+ self.glance_client = mock.Mock()
+ self.keystone_client = mock.Mock()
+ self.nova_client = mock.Mock()
+ self.orchestrator = {'requirements': {'ram_min': 2,
+ 'os_image': 'test_os_image'},
+ 'blueprint': {'url': 'test_url',
+ 'branch': 'test_branch'},
+ 'inputs': {'public_domain': 'test_domain'},
+ 'object': 'test_object',
+ 'deployment_name': 'test_deployment_name'}
+ self.ims_vnf.orchestrator = self.orchestrator
+ self.ims_vnf.images = {'test_image': 'test_url'}
+ self.ims_vnf.vnf = self.orchestrator
+ self.ims_vnf.tenant_name = 'test_tenant'
+ self.ims_vnf.inputs = {'public_domain': 'test_domain'}
+ self.ims_vnf.glance_client = self.glance_client
+ self.ims_vnf.neutron_client = self.neutron_client
+ self.ims_vnf.keystone_client = self.keystone_client
+ self.ims_vnf.nova_client = self.nova_client
+ self.ims_vnf.admin_creds = 'test_creds'
+
+ self.mock_post = mock.Mock()
+ attrs = {'status_code': 201,
+ 'cookies': ""}
+ self.mock_post.configure_mock(**attrs)
+
+ self.mock_post_200 = mock.Mock()
+ attrs = {'status_code': 200,
+ 'cookies': ""}
+ self.mock_post_200.configure_mock(**attrs)
+
+ def test_deploy_orchestrator_missing_image(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_neutron_client',
+ return_value=self.neutron_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_glance_client',
+ return_value=self.glance_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_keystone_client',
+ return_value=self.keystone_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_nova_client',
+ return_value=self.nova_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ return_value=''), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'download_and_add_image_on_glance') as m, \
+ self.assertRaises(Exception) as context:
+ self.ims_vnf.deploy_orchestrator()
+ self.assertTrue(m.called)
+ msg = "Failed to find or upload required OS "
+ msg += "image for this deployment"
+ self.assertTrue(msg in context.exception)
+
+ def test_deploy_orchestrator_extend_quota_fail(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_neutron_client',
+ return_value=self.neutron_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_glance_client',
+ return_value=self.glance_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_keystone_client',
+ return_value=self.keystone_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_nova_client',
+ return_value=self.nova_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ return_value='image_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_tenant_id',
+ return_value='tenant_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.update_sg_quota',
+ return_value=False), \
+ self.assertRaises(Exception) as context:
+ self.ims_vnf.deploy_orchestrator()
+ msg = "Failed to update security group quota"
+ msg += " for tenant test_tenant"
+ self.assertTrue(msg in context.exception)
+
+ def _get_image_id(self, client, name):
+ if name == 'test_image':
+ return 'image_id'
+ else:
+ return ''
+
+ def test_deploy_orchestrator_missing_flavor(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_neutron_client',
+ return_value=self.neutron_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_glance_client',
+ return_value=self.glance_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_keystone_client',
+ return_value=self.keystone_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_nova_client',
+ return_value=self.nova_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ side_effect=self._get_image_id), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_tenant_id',
+ return_value='tenant_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.update_sg_quota',
+ return_value=True), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_endpoint',
+ return_value='public_auth_url'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Orchestrator', return_value=mock.Mock()) as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(False, '')), \
+ self.assertRaises(Exception) as context:
+ self.ims_vnf.deploy_orchestrator()
+ self.assertTrue(m.set_credentials.called)
+ msg = "Failed to find required flavorfor this deployment"
+ self.assertTrue(msg in context.exception)
+
+ def test_deploy_orchestrator_missing_os_image(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_neutron_client',
+ return_value=self.neutron_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_glance_client',
+ return_value=self.glance_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_keystone_client',
+ return_value=self.keystone_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_nova_client',
+ return_value=self.nova_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ side_effect=self._get_image_id), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_tenant_id',
+ return_value='tenant_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.update_sg_quota',
+ return_value=True), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_endpoint',
+ return_value='public_auth_url'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Orchestrator', return_value=mock.Mock()) as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(True, 'flavor_id')), \
+ self.assertRaises(Exception) as context:
+ self.ims_vnf.deploy_orchestrator()
+ self.assertTrue(m.set_credentials.called)
+ self.assertTrue(m.set_flavor_id.called)
+ msg = "Failed to find required OS image for cloudify manager"
+ self.assertTrue(msg in context.exception)
+
+ def test_deploy_orchestrator_get_ext_network_fail(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_neutron_client',
+ return_value=self.neutron_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_glance_client',
+ return_value=self.glance_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_keystone_client',
+ return_value=self.keystone_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_nova_client',
+ return_value=self.nova_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ return_value='image_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_tenant_id',
+ return_value='tenant_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.update_sg_quota',
+ return_value=True), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_endpoint',
+ return_value='public_auth_url'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Orchestrator', return_value=mock.Mock()) as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(True, 'flavor_id')), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_external_net',
+ return_value=''), \
+ self.assertRaises(Exception) as context:
+ self.ims_vnf.deploy_orchestrator()
+ self.assertTrue(m.set_credentials.called)
+ self.assertTrue(m.set_flavor_id.called)
+ self.assertTrue(m.set_image_id.called)
+ msg = "Failed to get external network"
+ self.assertTrue(msg in context.exception)
+
+ def test_deploy_orchestrator_with_error(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_neutron_client',
+ return_value=self.neutron_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_glance_client',
+ return_value=self.glance_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_keystone_client',
+ return_value=self.keystone_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_nova_client',
+ return_value=self.nova_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ return_value='image_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_tenant_id',
+ return_value='tenant_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.update_sg_quota',
+ return_value=True), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_endpoint',
+ return_value='public_auth_url'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Orchestrator') as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(True, 'flavor_id')), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_external_net',
+ return_value='ext_net'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'ft_utils.get_resolvconf_ns',
+ return_value=True), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'ft_utils.execute_command'):
+ mock_obj = mock.Mock()
+ attrs = {'deploy_manager.return_value': 'error'}
+ mock_obj.configure_mock(**attrs)
+
+ m.return_value = mock_obj
+
+ self.assertEqual(self.ims_vnf.deploy_orchestrator(),
+ {'status': 'FAIL', 'result': 'error'})
+
+ def test_deploy_orchestrator_default(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_neutron_client',
+ return_value=self.neutron_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_glance_client',
+ return_value=self.glance_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_keystone_client',
+ return_value=self.keystone_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_nova_client',
+ return_value=self.nova_client), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ return_value='image_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_tenant_id',
+ return_value='tenant_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.update_sg_quota',
+ return_value=True), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_endpoint',
+ return_value='public_auth_url'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Orchestrator') as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(True, 'flavor_id')), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_external_net',
+ return_value='ext_net'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'ft_utils.get_resolvconf_ns',
+ return_value=True), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'ft_utils.execute_command'):
+ mock_obj = mock.Mock()
+ attrs = {'deploy_manager.return_value': ''}
+ mock_obj.configure_mock(**attrs)
+
+ m.return_value = mock_obj
+
+ self.assertEqual(self.ims_vnf.deploy_orchestrator(),
+ {'status': 'PASS', 'result': ''})
+
+ def test_deploy_vnf_missing_flavor(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Clearwater', return_value=mock.Mock()), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(False, '')), \
+ self.assertRaises(Exception) as context:
+ self.ims_vnf.deploy_vnf()
+ msg = "Failed to find required flavor for this deployment"
+ self.assertTrue(msg in context.exception)
+
+ def test_deploy_vnf_missing_os_image(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Clearwater', return_value=mock.Mock()) as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(True, 'test_flavor')), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ return_value=''), \
+ self.assertRaises(Exception) as context:
+ self.ims_vnf.deploy_vnf()
+ msg = "Failed to find required OS image"
+ msg += " for clearwater VMs"
+ self.assertTrue(msg in context.exception)
+ self.assertTrue(m.set_flavor_id.called)
+
+ def test_deploy_vnf_missing_get_ext_net(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Clearwater', return_value=mock.Mock()) as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(True, 'test_flavor')), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ return_value='image_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_external_net',
+ return_value=''), \
+ self.assertRaises(Exception) as context:
+ self.ims_vnf.deploy_vnf()
+ msg = "Failed to get external network"
+ self.assertTrue(msg in context.exception)
+ self.assertTrue(m.set_flavor_id.called)
+ self.assertTrue(m.set_image_id.called)
+
+ def test_deploy_vnf_with_error(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Clearwater') as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(True, 'test_flavor')), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ return_value='image_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_external_net',
+ return_value='ext_net'):
+ mock_obj = mock.Mock()
+ attrs = {'deploy_vnf.return_value': 'error'}
+ mock_obj.configure_mock(**attrs)
+
+ m.return_value = mock_obj
+
+ self.assertEqual(self.ims_vnf.deploy_vnf(),
+ {'status': 'FAIL', 'result': 'error'})
+
+ def test_deploy_vnf_default(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'Clearwater') as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_or_create_flavor',
+ return_value=(True, 'test_flavor')), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_image_id',
+ return_value='image_id'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.get_external_net',
+ return_value='ext_net'):
+ mock_obj = mock.Mock()
+ attrs = {'deploy_vnf.return_value': ''}
+ mock_obj.configure_mock(**attrs)
+
+ m.return_value = mock_obj
+
+ self.assertEqual(self.ims_vnf.deploy_vnf(),
+ {'status': 'PASS', 'result': ''})
+
+ def test_test_vnf_ip_retrieval_failure(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os.popen', side_effect=Exception), \
+ self.assertRaises(Exception) as context:
+ msg = "Unable to retrieve the IP of the "
+ msg += "cloudify manager server !"
+ self.ims_vnf.test_vnf()
+ self.assertTrue(msg in context.exception)
+
+ def test_test_vnf_create_number_failure(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os.popen') as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'requests.get'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'requests.post',
+ return_value=self.mock_post), \
+ self.assertRaises(Exception) as context:
+ mock_obj = mock.Mock()
+ attrs = {'read.return_value': 'test_ip\n'}
+ mock_obj.configure_mock(**attrs)
+ m.return_value = mock_obj
+
+ self.ims_vnf.test_vnf()
+
+ msg = "Unable to create a number:"
+ self.assertTrue(msg in context.exception)
+
+ def _get_post_status(self, url, cookies='', data=''):
+ ellis_url = "http://test_ellis_ip/session"
+ if url == ellis_url:
+ return self.mock_post_200
+ return self.mock_post
+
+ def test_test_vnf_fail(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os.popen') as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'requests.get') as mock_get, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'requests.post',
+ side_effect=self._get_post_status), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'ft_utils.get_resolvconf_ns'), \
+ mock.patch('__builtin__.open', mock.mock_open()), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'subprocess.call'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os.remove'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'json.load', return_value=''):
+ mock_obj = mock.Mock()
+ attrs = {'read.return_value': 'test_ip\n'}
+ mock_obj.configure_mock(**attrs)
+ m.return_value = mock_obj
+
+ mock_obj2 = mock.Mock()
+ attrs = {'json.return_value': {'outputs':
+ {'dns_ip': 'test_dns_ip',
+ 'ellis_ip': 'test_ellis_ip'}}}
+ mock_obj2.configure_mock(**attrs)
+ mock_get.return_value = mock_obj2
+
+ self.assertEqual(self.ims_vnf.test_vnf(),
+ {'status': 'FAIL', 'result': ''})
+
+ def test_test_vnf_pass(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os.popen') as m, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'requests.get') as mock_get, \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'requests.post',
+ side_effect=self._get_post_status), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'ft_utils.get_resolvconf_ns'), \
+ mock.patch('__builtin__.open', mock.mock_open()), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'subprocess.call'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os.remove'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'json.load', return_value='vims_test_result'):
+ mock_obj = mock.Mock()
+ attrs = {'read.return_value': 'test_ip\n'}
+ mock_obj.configure_mock(**attrs)
+ m.return_value = mock_obj
+
+ mock_obj2 = mock.Mock()
+ attrs = {'json.return_value': {'outputs':
+ {'dns_ip': 'test_dns_ip',
+ 'ellis_ip': 'test_ellis_ip'}}}
+ mock_obj2.configure_mock(**attrs)
+ mock_get.return_value = mock_obj2
+
+ self.assertEqual(self.ims_vnf.test_vnf(),
+ {'status': 'PASS', 'result': 'vims_test_result'})
+
+ def test_download_and_add_image_on_glance_incorrect_url(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os.makedirs'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'ft_utils.download_url',
+ return_value=False):
+ resp = cloudify_ims.download_and_add_image_on_glance(self.
+ glance_client,
+ 'image_name',
+ 'http://url',
+ 'data_dir')
+ self.assertEqual(resp, False)
+
+ def test_download_and_add_image_on_glance_image_creation_failure(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os.makedirs'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'ft_utils.download_url',
+ return_value=True), \
+ mock.patch('functest.opnfv_tests.vnf.ims.cloudify_ims.'
+ 'os_utils.create_glance_image',
+ return_value=''):
+ resp = cloudify_ims.download_and_add_image_on_glance(self.
+ glance_client,
+ 'image_name',
+ 'http://url',
+ 'data_dir')
+ self.assertEqual(resp, False)
+
+
+if __name__ == "__main__":
+ unittest.main(verbosity=2)
diff --git a/functest/tests/unit/opnfv_tests/vnf/ims/test_orchestrator_cloudify.py b/functest/tests/unit/opnfv_tests/vnf/ims/test_orchestrator_cloudify.py
new file mode 100644
index 00000000..620b0216
--- /dev/null
+++ b/functest/tests/unit/opnfv_tests/vnf/ims/test_orchestrator_cloudify.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import logging
+import unittest
+
+import mock
+
+from functest.opnfv_tests.vnf.ims import orchestrator_cloudify
+
+
+class ImsVnfTesting(unittest.TestCase):
+
+ logging.disable(logging.CRITICAL)
+
+ def setUp(self):
+ self.orchestrator = orchestrator_cloudify.Orchestrator('test_dir')
+ self.bp = {'file_name': 'test_file',
+ 'destination_folder': 'test_folder',
+ 'url': 'test_url',
+ 'branch': 'test_branch'}
+
+ def test_download_manager_blueprint_download_blueprint_failed(self):
+ self.orchestrator.manager_blueprint = False
+ with mock.patch.object(self.orchestrator, '_download_blueprints',
+ return_value=False), \
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'exit') as mock_exit:
+ self.orchestrator.download_manager_blueprint('test_url',
+ 'test_branch')
+ mock_exit.assert_any_call(-1)
+
+ def test_download_manager_blueprint_download_blueprint_passed(self):
+ self.orchestrator.manager_blueprint = False
+ with mock.patch.object(self.orchestrator, '_download_blueprints',
+ return_value=True):
+ self.orchestrator.download_manager_blueprint('test_url',
+ 'test_branch')
+ self.assertEqual(self.orchestrator.manager_blueprint,
+ True)
+
+ def test_deploy_manager_failed(self):
+ self.orchestrator.manager_blueprint = True
+ with mock.patch('__builtin__.open', mock.mock_open()), \
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'os.remove'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'execute_command', return_value='error'):
+ self.assertEqual(self.orchestrator.deploy_manager(),
+ 'error')
+ self.assertEqual(self.orchestrator.manager_up,
+ False)
+
+ def test_deploy_manager_passed(self):
+ self.orchestrator.manager_blueprint = True
+ with mock.patch('__builtin__.open', mock.mock_open()), \
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'os.remove'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'execute_command', return_value=''):
+ self.orchestrator.deploy_manager()
+ self.assertEqual(self.orchestrator.manager_up,
+ True)
+
+ def test_undeploy_manager_passed(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'execute_command', return_value=''):
+ self.orchestrator.deploy_manager()
+ self.assertEqual(self.orchestrator.manager_up,
+ False)
+
+ def test_dwnld_upload_and_depl_blueprint_dwnld_blueprint_failed(self):
+ with mock.patch.object(self.orchestrator, '_download_blueprints',
+ return_value=False), \
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'exit', side_effect=Exception) as mock_exit, \
+ self.assertRaises(Exception):
+ self.orchestrator.download_upload_and_deploy_blueprint(self.bp,
+ 'cfig',
+ 'bpn',
+ 'dpn')
+ mock_exit.assert_any_call(-1)
+
+ def test_dwnld_upload_and_depl_blueprint_failed(self):
+ with mock.patch.object(self.orchestrator, '_download_blueprints',
+ return_value=True), \
+ mock.patch('__builtin__.open', mock.mock_open()), \
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'execute_command', return_value='error'):
+ r = self.orchestrator.download_upload_and_deploy_blueprint(self.bp,
+ 'cfig',
+ 'bpn',
+ 'dpn')
+ self.assertEqual(r, 'error')
+
+ def test__download_blueprints_failed(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'shutil.rmtree'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'Repo.clone_from', side_effect=Exception):
+ self.assertEqual(self.orchestrator._download_blueprints('bp_url',
+ 'branch',
+ 'dest'),
+ False)
+
+ def test__download_blueprints_passed(self):
+ with mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'shutil.rmtree'), \
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestrator_cloudify.'
+ 'Repo.clone_from'):
+ self.assertEqual(self.orchestrator._download_blueprints('bp_url',
+ 'branch',
+ 'dest'),
+ True)
+
+
+if __name__ == "__main__":
+ unittest.main(verbosity=2)
diff --git a/functest/tests/unit/utils/test_openstack_utils.py b/functest/tests/unit/utils/test_openstack_utils.py
index 447271fc..ef3764cc 100644
--- a/functest/tests/unit/utils/test_openstack_utils.py
+++ b/functest/tests/unit/utils/test_openstack_utils.py
@@ -104,7 +104,6 @@ class OSUtilsTesting(unittest.TestCase):
'servers.create.return_value': self.instance,
'flavors.list.return_value': [self.flavor],
'flavors.find.return_value': self.flavor,
- 'flavors.list.return_value': [self.flavor],
'servers.add_floating_ip.return_value': mock.Mock(),
'servers.force_delete.return_value': mock.Mock(),
'aggregates.list.return_value': [self.aggregate],
@@ -162,6 +161,15 @@ class OSUtilsTesting(unittest.TestCase):
}
self.cinder_client.configure_mock(**attrs)
+ self.resource = mock.Mock()
+ attrs = {'id': 'resource_test_id',
+ 'name': 'resource_test_name'
+ }
+
+ self.heat_client = mock.Mock()
+ attrs = {'resources.get.return_value': self.resource}
+ self.heat_client.configure_mock(**attrs)
+
mock_obj = mock.Mock()
attrs = {'id': 'tenant_id',
'name': 'test_tenant'}
@@ -543,6 +551,36 @@ class OSUtilsTesting(unittest.TestCase):
mock_glan_client.assert_called_once_with('3',
session=mock_session_obj)
+ @mock.patch('functest.utils.openstack_utils.os.getenv',
+ return_value=None)
+ def test_get_heat_client_version_missing_env(self, mock_os_getenv):
+ self.assertEqual(openstack_utils.get_heat_client_version(),
+ openstack_utils.DEFAULT_HEAT_API_VERSION)
+
+ @mock.patch('functest.utils.openstack_utils.logger.info')
+ @mock.patch('functest.utils.openstack_utils.os.getenv', return_value='1')
+ def test_get_heat_client_version_default(self, mock_os_getenv,
+ mock_logger_info):
+ self.assertEqual(openstack_utils.get_heat_client_version(), '1')
+ mock_logger_info.assert_called_once_with(
+ "OS_ORCHESTRATION_API_VERSION is set in env as '%s'", '1')
+
+ def test_get_heat_client(self):
+ mock_heat_obj = mock.Mock()
+ mock_session_obj = mock.Mock()
+ with mock.patch('functest.utils.openstack_utils'
+ '.get_heat_client_version', return_value='1'), \
+ mock.patch('functest.utils.openstack_utils'
+ '.heatclient.Client',
+ return_value=mock_heat_obj) \
+ as mock_heat_client, \
+ mock.patch('functest.utils.openstack_utils.get_session',
+ return_value=mock_session_obj):
+ self.assertEqual(openstack_utils.get_heat_client(),
+ mock_heat_obj)
+ mock_heat_client.assert_called_once_with('1',
+ session=mock_session_obj)
+
def test_get_instances_default(self):
self.assertEqual(openstack_utils.get_instances(self.nova_client),
[self.instance])
@@ -1700,6 +1738,24 @@ class OSUtilsTesting(unittest.TestCase):
'user_id'))
self.assertTrue(mock_logger_error.called)
+ def test_get_resource_default(self):
+ with mock.patch('functest.utils.openstack_utils.'
+ 'is_keystone_v3', return_value=True):
+ self.assertEqual(openstack_utils.
+ get_resource(self.heat_client,
+ 'stack_id',
+ 'resource'),
+ self.resource)
+
+ @mock.patch('functest.utils.openstack_utils.logger.error')
+ def test_get_resource_exception(self, mock_logger_error):
+ self.assertEqual(openstack_utils.
+ get_resource(Exception,
+ 'stack_id',
+ 'resource'),
+ None)
+ self.assertTrue(mock_logger_error.called)
+
if __name__ == "__main__":
unittest.main(verbosity=2)
diff --git a/functest/utils/config.py b/functest/utils/config.py
index 84166c1d..b5b84501 100644..100755
--- a/functest/utils/config.py
+++ b/functest/utils/config.py
@@ -2,26 +2,25 @@ import os
import yaml
+import env
+
class Config(object):
def __init__(self):
- if 'CONFIG_FUNCTEST_YAML' not in os.environ:
- raise Exception('CONFIG_FUNCTEST_YAML not configed')
- self.config_functest = os.environ['CONFIG_FUNCTEST_YAML']
try:
- with open(self.config_functest) as f:
+ with open(env.ENV.CONFIG_FUNCTEST_YAML) as f:
self.functest_yaml = yaml.safe_load(f)
self._parse(None, self.functest_yaml)
- except:
- raise Exception('Parse {} failed'.format(self.config_functest))
+ except Exception as error:
+ raise Exception('Parse config failed: {}'.format(str(error)))
self._set_others()
def _parse(self, attr_now, left_parametes):
for param_n, param_v in left_parametes.iteritems():
attr_further = self._get_attr_further(attr_now, param_n)
- if not isinstance(param_v, dict):
+ if attr_further:
self.__setattr__(attr_further, param_v)
- else:
+ if isinstance(param_v, dict):
self._parse(attr_further, param_v)
def _get_attr_further(self, attr_now, next):
@@ -33,3 +32,8 @@ class Config(object):
CONF = Config()
+
+if __name__ == "__main__":
+ print CONF.vnf_cloudify_ims
+ print CONF.vnf_cloudify_ims_tenant_images
+ print CONF.vnf_cloudify_ims_tenant_images_centos_7
diff --git a/functest/utils/env.py b/functest/utils/env.py
index fa5245fb..7e4df2ea 100644
--- a/functest/utils/env.py
+++ b/functest/utils/env.py
@@ -3,7 +3,7 @@ import re
default_envs = {
'NODE_NAME': 'unknown_pod',
- 'CI_DEBUG': 'true',
+ 'CI_DEBUG': 'false',
'DEPLOY_SCENARIO': 'os-nosdn-nofeature-noha',
'DEPLOY_TYPE': 'virt',
'INSTALLER_TYPE': None,
diff --git a/functest/utils/functest_logger.py b/functest/utils/functest_logger.py
index 0cba8c52..022211cb 100755
--- a/functest/utils/functest_logger.py
+++ b/functest/utils/functest_logger.py
@@ -29,10 +29,12 @@ import json
from functest.utils.constants import CONST
-class Logger:
+class Logger(object):
+
def __init__(self, logger_name):
self.setup_logging()
self.logger = logging.getLogger(logger_name)
+ logging.getLogger("paramiko").setLevel(logging.WARNING)
def getLogger(self):
return self.logger
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index b2c36cff..dbed811a 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -291,6 +291,13 @@ def get_ci_envvars():
return ci_env_var
+def execute_command_raise(cmd, info=False, error_msg="",
+ verbose=True, output_file=None):
+ ret = execute_command(cmd, info, error_msg, verbose, output_file)
+ if ret != 0:
+ raise Exception(error_msg)
+
+
def execute_command(cmd, info=False, error_msg="",
verbose=True, output_file=None):
if not error_msg:
@@ -362,7 +369,7 @@ def get_parameter_from_yaml(parameter, file):
value = value.get(element)
if value is None:
raise ValueError("The parameter %s is not defined in"
- " config_functest.yaml" % parameter)
+ " %s" % (parameter, file))
return value
diff --git a/functest/utils/openstack_tacker.py b/functest/utils/openstack_tacker.py
index f3597965..8327fdbe 100644
--- a/functest/utils/openstack_tacker.py
+++ b/functest/utils/openstack_tacker.py
@@ -45,8 +45,16 @@ def get_vnfd_id(tacker_client, vnfd_name):
return get_id_from_name(tacker_client, 'vnfd', vnfd_name)
-def get_vnf_id(tacker_client, vnf_name):
- return get_id_from_name(tacker_client, 'vnf', vnf_name)
+def get_vnf_id(tacker_client, vnf_name, timeout=5):
+ vnf_id = None
+ while vnf_id is None and timeout >= 0:
+ vnf_id = get_id_from_name(tacker_client, 'vnf', vnf_name)
+ if vnf_id is None:
+ logger.info("Could not retrieve ID for vnf with name [%s]."
+ " Retrying." % vnf_name)
+ time.sleep(1)
+ timeout -= 1
+ return vnf_id
def get_sfc_id(tacker_client, sfc_name):
@@ -118,6 +126,7 @@ def create_vnf(tacker_client, vnf_name, vnfd_id=None,
}
}
if param_file is not None:
+ params = None
with open(param_file) as f:
params = f.read()
vnf_body['vnf']['attributes']['param_values'] = params
@@ -135,32 +144,44 @@ def create_vnf(tacker_client, vnf_name, vnfd_id=None,
return None
-def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None):
+def get_vnf(tacker_client, vnf_id=None, vnf_name=None):
try:
- _id = None
- if vnf_id is not None:
- _id = vnf_id
- elif vnf_name is not None:
- while _id is None:
- try:
- _id = get_vnf_id(tacker_client, vnf_name)
- except:
- logger.error("Bazinga")
- else:
+ if vnf_id is None and vnf_name is None:
raise Exception('You must specify vnf_id or vnf_name')
- while True:
- vnf = [v for v in list_vnfs(tacker_client, verbose=True)['vnfs']
- if v['id'] == _id]
- vnf = vnf[0]
- logger.info('Waiting for vnf {0}'.format(str(vnf)))
+
+ _id = get_vnf_id(tacker_client, vnf_name) if vnf_id is None else vnf_id
+
+ if _id is not None:
+ all_vnfs = list_vnfs(tacker_client, verbose=True)['vnfs']
+ return next((vnf for vnf in all_vnfs if vnf['id'] == _id), None)
+ else:
+ raise Exception('Could not retrieve ID from name [%s]' % vnf_name)
+
+ except Exception, e:
+ logger.error("Could not retrieve VNF [vnf_id=%s, vnf_name=%s] - %s"
+ % (vnf_id, vnf_name, e))
+ return None
+
+
+def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=60):
+ try:
+ vnf = get_vnf(tacker_client, vnf_id, vnf_name)
+ if vnf is None:
+ raise Exception("Could not retrieve VNF - id='%s', name='%s'"
+ % vnf_id, vnf_name)
+ logger.info('Waiting for vnf {0}'.format(str(vnf)))
+ while vnf['status'] != 'ACTIVE' and timeout >= 0:
if vnf['status'] == 'ERROR':
- raise Exception('Error when booting vnf %s' % _id)
+ raise Exception('Error when booting vnf %s' % vnf['id'])
elif vnf['status'] == 'PENDING_CREATE':
time.sleep(3)
- continue
- else:
- break
- return _id
+ timeout -= 3
+ vnf = get_vnf(tacker_client, vnf_id, vnf_name)
+
+ if (timeout < 0):
+ raise Exception('Timeout when booting vnf %s' % vnf['id'])
+
+ return vnf['id']
except Exception, e:
logger.error("error [wait_for_vnf(tacker_client, '%s', '%s')]: %s"
% (vnf_id, vnf_name, e))
@@ -194,7 +215,8 @@ def list_sfcs(tacker_client, verbose=False):
def create_sfc(tacker_client, sfc_name,
chain_vnf_ids=None,
- chain_vnf_names=None):
+ chain_vnf_names=None,
+ symmetrical=False):
try:
sfc_body = {
'sfc': {
@@ -203,6 +225,8 @@ def create_sfc(tacker_client, sfc_name,
'chain': []
}
}
+ if symmetrical:
+ sfc_body['sfc']['symmetrical'] = True
if chain_vnf_ids is not None:
sfc_body['sfc']['chain'] = chain_vnf_ids
else:
diff --git a/functest/utils/openstack_utils.py b/functest/utils/openstack_utils.py
index 3093cb55..e33af63b 100755
--- a/functest/utils/openstack_utils.py
+++ b/functest/utils/openstack_utils.py
@@ -18,6 +18,7 @@ from keystoneauth1 import loading
from keystoneauth1 import session
from cinderclient import client as cinderclient
from glanceclient import client as glanceclient
+from heatclient import client as heatclient
from novaclient import client as novaclient
from keystoneclient import client as keystoneclient
from neutronclient.neutron import client as neutronclient
@@ -28,6 +29,7 @@ import functest.utils.functest_utils as ft_utils
logger = ft_logger.Logger("openstack_utils").getLogger()
DEFAULT_API_VERSION = '2'
+DEFAULT_HEAT_API_VERSION = '1'
# *********************************************
@@ -241,6 +243,20 @@ def get_glance_client(other_creds={}):
return glanceclient.Client(get_glance_client_version(), session=sess)
+def get_heat_client_version():
+ api_version = os.getenv('OS_ORCHESTRATION_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_ORCHESTRATION_API_VERSION is set in env as '%s'",
+ api_version)
+ return api_version
+ return DEFAULT_HEAT_API_VERSION
+
+
+def get_heat_client(other_creds={}):
+ sess = get_session(other_creds)
+ return heatclient.Client(get_heat_client_version(), session=sess)
+
+
# *********************************************
# NOVA
# *********************************************
@@ -985,36 +1001,43 @@ def create_security_group(neutron_client, sg_name, sg_description):
def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
port_range_min=None, port_range_max=None):
- if port_range_min is None and port_range_max is None:
- json_body = {'security_group_rule': {'direction': direction,
- 'security_group_id': sg_id,
- 'protocol': protocol}}
- elif port_range_min is not None and port_range_max is not None:
- json_body = {'security_group_rule': {'direction': direction,
- 'security_group_id': sg_id,
- 'port_range_min': port_range_min,
- 'port_range_max': port_range_max,
- 'protocol': protocol}}
+ # We create a security group in 2 steps
+ # 1 - we check the format and set the json body accordingly
+ # 2 - we call neturon client to create the security group
+
+ # Format check
+ json_body = {'security_group_rule': {'direction': direction,
+ 'security_group_id': sg_id,
+ 'protocol': protocol}}
+ # parameters may be
+ # - both None => we do nothing
+ # - both Not None => we add them to the json description
+ # but one cannot be None is the other is not None
+ if (port_range_min is not None and port_range_max is not None):
+ # add port_range in json description
+ json_body['security_group_rule']['port_range_min'] = port_range_min
+ json_body['security_group_rule']['port_range_max'] = port_range_max
+ logger.debug("Security_group format set (port range included)")
else:
- logger.error("Error [create_secgroup_rule(neutron_client, '%s', '%s', "
- "'%s', '%s', '%s', '%s')]:" % (neutron_client,
- sg_id, direction,
- port_range_min,
- port_range_max,
- protocol),
- " Invalid values for port_range_min, port_range_max")
- return False
+ # either both port range are set to None => do nothing
+ # or one is set but not the other => log it and return False
+ if port_range_min is None and port_range_max is None:
+ logger.debug("Security_group format set (no port range mentioned)")
+ else:
+ logger.error("Bad security group format."
+ "One of the port range is not properly set:"
+ "range min: {},"
+ "range max: {}".format(port_range_min,
+ port_range_max))
+ return False
+
+ # Create security group using neutron client
try:
neutron_client.create_security_group_rule(json_body)
return True
- except Exception, e:
- logger.error("Error [create_secgroup_rule(neutron_client, '%s', '%s', "
- "'%s', '%s', '%s', '%s')]: %s" % (neutron_client,
- sg_id,
- direction,
- port_range_min,
- port_range_max,
- protocol, e))
+ except:
+ logger.exception("Impossible to create_security_group_rule,"
+ "security group rule probably already exists")
return False
@@ -1383,3 +1406,15 @@ def delete_user(keystone_client, user_id):
logger.error("Error [delete_user(keystone_client, '%s')]: %s"
% (user_id, e))
return False
+
+
+# *********************************************
+# HEAT
+# *********************************************
+def get_resource(heat_client, stack_id, resource):
+ try:
+ resources = heat_client.resources.get(stack_id, resource)
+ return resources
+ except Exception, e:
+ logger.error("Error [get_resource]: %s" % e)
+ return None