aboutsummaryrefslogtreecommitdiffstats
path: root/testcases/VIM/OpenStack/CI/libraries
diff options
context:
space:
mode:
Diffstat (limited to 'testcases/VIM/OpenStack/CI/libraries')
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/healthcheck.sh208
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally-cert.py560
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_tempest.py347
3 files changed, 0 insertions, 1115 deletions
diff --git a/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh b/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh
deleted file mode 100755
index 611c100c5..000000000
--- a/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh
+++ /dev/null
@@ -1,208 +0,0 @@
-#
-# OpenStack Health Check
-# This script is meant for really basic API operations on OpenStack
-# Services tested: Keystone, Glance, Cinder, Neutron, Nova
-#
-#
-# Author:
-# jose.lausuch@ericsson.com
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-set -e
-
-#Redirect all the output (stdout) to a log file and show only possible errors.
-LOG_FILE=/home/opnfv/functest/results/healthcheck.log
-echo "">$LOG_FILE
-exec 1<>$LOG_FILE
-
-info () {
- echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - INFO - " "$*" | tee -a $LOG_FILE 1>&2
-}
-
-debug () {
- if [[ "${CI_DEBUG,,}" == "true" ]]; then
- echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - DEBUG - " "$*" | tee -a $LOG_FILE 1>&2
- fi
-}
-
-error () {
- echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - ERROR - " "$*" | tee -a $LOG_FILE 1>&2
- exit 1
-}
-
-if [ -z $OS_AUTH_URL ]; then
- echo "Source credentials first."
- exit 1
-fi
-
-
-echo "Using following credentials:"
-env | grep OS
-
-## Variables:
-project_1="opnfv-tenant1"
-project_2="opnfv-tenant2"
-user_1="opnfv_user1"
-user_2="opnfv_user2"
-user_3="opnfv_user3"
-user_4="opnfv_user4"
-user_5="opnfv_user5"
-user_6="opnfv_user6"
-image_1="opnfv-image1"
-image_2="opnfv-image2"
-volume_1="opnfv-volume1"
-volume_2="opnfv-volume2"
-net_1="opnfv-network1"
-net_2="opnfv-network2"
-subnet_1="opnfv-subnet1"
-subnet_2="opnfv-subnet2"
-port_1="opnfv-port1"
-port_2="opnfv-port2"
-router_1="opnfv-router1"
-router_2="opnfv-router2"
-instance_1="opnfv-instance1"
-instance_2="opnfv-instance2"
-instance_3="opnfv-instance3"
-instance_4="opnfv-instance4"
-
-
-
-function wait_for_ip() {
- # $1 is the instance name
- # $2 is the first octet of the subnet ip
- timeout=60
- while [[ ${timeout} > 0 ]]; do
- if [[ $(nova console-log $1|grep "No lease, failing") ]]; then
- error "The instance $1 couldn't get an IP from the DHCP agent." | tee -a $LOG_FILE 1>&2
- exit 1
- elif [[ $(nova console-log $1|grep "^Lease"|grep "obtained") ]]; then
- debug "The instance $1 got an IP successfully from the DHCP agent." | tee -a $LOG_FILE 1>&2
- break
- fi
- let timeout=timeout-1
- sleep 1
- done
-}
-
-
-#################################
-info "Testing Keystone API..." | tee -a $LOG_FILE 1>&2
-#################################
-openstack project create ${project_1}
-debug "project '${project_1}' created."
-openstack project create ${project_2}
-debug "project '${project_2}' created."
-openstack user create ${user_1} --project ${project_1}
-debug "user '${user_1}' created in project ${project_1}."
-openstack user create ${user_2} --project ${project_1}
-debug "user '${user_2}' created in project ${project_1}."
-openstack user create ${user_3} --project ${project_1}
-debug "user '${user_3}' created in project ${project_1}."
-openstack user create ${user_4} --project ${project_2}
-debug "user '${user_4}' created in project ${project_2}."
-openstack user create ${user_5} --project ${project_2}
-debug "user '${user_5}' created in project ${project_2}."
-openstack user create ${user_6} --project ${project_2}
-debug "user '${user_6}' created in project ${project_2}."
-info "...Keystone OK!"
-
-#################################
-info "Testing Glance API..."
-#################################
-image=/home/opnfv/functest/data/cirros-0.3.4-x86_64-disk.img
-glance image-create --name ${image_1} --disk-format qcow2 --container-format bare < ${image}
-debug "image '${image_1}' created."
-glance image-create --name ${image_2} --disk-format qcow2 --container-format bare < ${image}
-debug "image '${image_2}' created."
-info "... Glance OK!"
-
-#################################
-info "Testing Cinder API..."
-#################################
-cinder create --display_name ${volume_1} 1
-debug "volume '${volume_1}' created."
-cinder create --display_name ${volume_2} 10
-debug "volume '${volume_2}' created."
-info "...Cinder OK!"
-
-#################################
-info "Testing Neutron API..."
-#################################
-
-network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}'))
-for id in ${network_ids[@]}; do
- [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && ext_net_id=${id}
-done
-if [[ "${ext_net_id}" == "" ]]; then
- error "No external network found. Exiting Health Check..."
- exit 1
-else
- info "External network found. ${ext_net_id}"
-fi
-
-info "1. Create Networks..."
-neutron net-create ${net_1}
-debug "net '${net_1}' created."
-neutron net-create ${net_2}
-debug "net '${net_2}' created."
-net1_id=$(neutron net-list | grep ${net_1} | awk '{print $2}')
-net2_id=$(neutron net-list | grep ${net_2} | awk '{print $2}')
-
-info "2. Create subnets..."
-neutron subnet-create --name ${subnet_1} --allocation-pool start=10.6.0.2,end=10.6.0.253 --gateway 10.6.0.254 ${net_1} 10.6.0.0/24
-debug "subnet '${subnet_1}' created."
-neutron subnet-create --name ${subnet_2} --allocation-pool start=10.7.0.2,end=10.7.0.253 --gateway 10.7.0.254 ${net_2} 10.7.0.0/24
-debug "subnet '${subnet_2}' created."
-
-info "4. Create Routers..."
-neutron router-create ${router_1}
-debug "router '${router_1}' created."
-neutron router-create ${router_2}
-debug "router '${router_2}' created."
-
-neutron router-gateway-set ${router_1} ${ext_net_id}
-debug "router '${router_1}' gateway set to ${ext_net_id}."
-neutron router-gateway-set ${router_2} ${ext_net_id}
-debug "router '${router_2}' gateway set to ${ext_net_id}."
-
-neutron router-interface-add ${router_1} ${subnet_1}
-debug "router '${router_1}' interface added ${subnet_1}."
-neutron router-interface-add ${router_2} ${subnet_2}
-debug "router '${router_2}' interface added ${subnet_2}."
-
-info "...Neutron OK!"
-
-#################################
-info "Testing Nova API..."
-#################################
-
-nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_1}
-debug "nova instance '${instance_1}' booted on ${net_1}."
-nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_2}
-debug "nova instance '${instance_2}' booted on ${net_1}."
-nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_3}
-debug "nova instance '${instance_3}' booted on ${net_2}."
-nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_4}
-debug "nova instance '${instance_4}' booted on ${net_2}."
-
-vm1_id=$(nova list | grep ${instance_1} | awk '{print $2}')
-vm2_id=$(nova list | grep ${instance_2} | awk '{print $2}')
-vm3_id=$(nova list | grep ${instance_3} | awk '{print $2}')
-vm4_id=$(nova list | grep ${instance_4} | awk '{print $2}')
-info "...Nova OK!"
-
-info "Checking if instances get an IP from DHCP..."
-wait_for_ip ${instance_1} "10.6"
-wait_for_ip ${instance_2} "10.6"
-wait_for_ip ${instance_3} "10.7"
-wait_for_ip ${instance_4} "10.7"
-info "...DHCP OK!"
-
-info "Health check passed!"
-exit 0
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
deleted file mode 100755
index 4dc1e16d5..000000000
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
+++ /dev/null
@@ -1,560 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) 2015 Orange
-# guyrodrigue.koffi@orange.com
-# morgan.richomme@orange.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# 0.1 (05/2015) initial commit
-# 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
-# 0.3 (19/10/2015) remove Tempest from run_rally
-# and push result into test DB
-#
-import argparse
-import iniparse
-import json
-import os
-import re
-import requests
-import subprocess
-import time
-import yaml
-
-from novaclient import client as novaclient
-from glanceclient import client as glanceclient
-from keystoneclient.v2_0 import client as keystoneclient
-from neutronclient.v2_0 import client as neutronclient
-from cinderclient import client as cinderclient
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
-import functest.utils.openstack_utils as openstack_utils
-
-""" tests configuration """
-tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
- 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
-parser = argparse.ArgumentParser()
-parser.add_argument("test_name",
- help="Module name to be tested. "
- "Possible values are : "
- "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
- "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
- "{d[10]} ] "
- "The 'all' value "
- "performs all possible test scenarios"
- .format(d=tests))
-
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-parser.add_argument("-s", "--smoke",
- help="Smoke test mode",
- action="store_true")
-parser.add_argument("-v", "--verbose",
- help="Print verbose info about the progress",
- action="store_true")
-parser.add_argument("-n", "--noclean",
- help="Don't clean the created resources for this test.",
- action="store_true")
-parser.add_argument("-z", "--sanity",
- help="Sanity test mode, execute only a subset of tests",
- action="store_true")
-
-args = parser.parse_args()
-
-client_dict = {}
-network_dict = {}
-
-if args.verbose:
- RALLY_STDERR = subprocess.STDOUT
-else:
- RALLY_STDERR = open(os.devnull, 'w')
-
-""" logging configuration """
-logger = ft_logger.Logger("run_rally").getLogger()
-
-REPO_PATH = os.environ['repos_dir'] + '/functest/'
-if not os.path.exists(REPO_PATH):
- logger.error("Functest repository directory not found '%s'" % REPO_PATH)
- exit(-1)
-
-
-with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
- functest_yaml = yaml.safe_load(f)
-f.close()
-
-HOME = os.environ['HOME'] + "/"
-SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general").get(
- "directories").get("dir_rally_scn")
-TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
-SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
-
-FLAVOR_NAME = "m1.tiny"
-USERS_AMOUNT = 2
-TENANTS_AMOUNT = 3
-ITERATIONS_AMOUNT = 10
-CONCURRENCY = 4
-
-RESULTS_DIR = functest_yaml.get("general").get("directories").get(
- "dir_rally_res")
-TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
- "dir_results") + '/tempest/tempest.conf'
-TEST_DB = functest_yaml.get("results").get("test_db_url")
-
-PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
-PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
-PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
-ROUTER_NAME = functest_yaml.get("rally").get("router_name")
-
-GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
- "image_name")
-GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
- "image_file_name")
-GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
- "image_disk_format")
-GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
- "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
-
-CINDER_VOLUME_TYPE_NAME = "volume_test"
-
-
-SUMMARY = []
-
-
-def push_results_to_db(case, payload, criteria):
-
- url = TEST_DB + "/results"
- installer = functest_utils.get_installer_type(logger)
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
- pod_name = functest_utils.get_pod_name(logger)
-
- # evalutate success criteria
-
- params = {"project_name": "functest", "case_name": case,
- "pod_name": pod_name, "installer": installer,
- "version": version, "scenario": scenario,
- "criteria": criteria, "details": payload}
-
- headers = {'Content-Type': 'application/json'}
- r = requests.post(url, data=json.dumps(params), headers=headers)
- logger.debug(r)
-
-
-def get_task_id(cmd_raw):
- """
- get task id from command rally result
- :param cmd_raw:
- :return: task_id as string
- """
- taskid_re = re.compile('^Task +(.*): started$')
- for line in cmd_raw.splitlines(True):
- line = line.strip()
- match = taskid_re.match(line)
- if match:
- return match.group(1)
- return None
-
-
-def task_succeed(json_raw):
- """
- Parse JSON from rally JSON results
- :param json_raw:
- :return: Bool
- """
- rally_report = json.loads(json_raw)
- for report in rally_report:
- if report is None or report.get('result') is None:
- return False
-
- for result in report.get('result'):
- if result is None or len(result.get('error')) > 0:
- return False
-
- return True
-
-
-def live_migration_supported():
- config = iniparse.ConfigParser()
- if (config.read(TEMPEST_CONF_FILE) and
- config.has_section('compute-feature-enabled') and
- config.has_option('compute-feature-enabled', 'live_migration')):
- return config.getboolean('compute-feature-enabled', 'live_migration')
-
- return False
-
-
-def build_task_args(test_file_name):
- task_args = {'service_list': [test_file_name]}
- task_args['image_name'] = GLANCE_IMAGE_NAME
- task_args['flavor_name'] = FLAVOR_NAME
- task_args['glance_image_location'] = GLANCE_IMAGE_PATH
- task_args['tmpl_dir'] = TEMPLATE_DIR
- task_args['sup_dir'] = SUPPORT_DIR
- task_args['users_amount'] = USERS_AMOUNT
- task_args['tenants_amount'] = TENANTS_AMOUNT
- task_args['iterations'] = ITERATIONS_AMOUNT
- task_args['concurrency'] = CONCURRENCY
-
- if args.sanity:
- task_args['full_mode'] = False
- task_args['smoke'] = True
- else:
- task_args['full_mode'] = True
- task_args['smoke'] = args.smoke
-
- ext_net = openstack_utils.get_external_net(client_dict['neutron'])
- if ext_net:
- task_args['floating_network'] = str(ext_net)
- else:
- task_args['floating_network'] = ''
-
- net_id = network_dict['net_id']
- task_args['netid'] = str(net_id)
- task_args['live_migration'] = live_migration_supported()
-
- return task_args
-
-
-def get_output(proc, test_name):
- global SUMMARY
- result = ""
- nb_tests = 0
- overall_duration = 0.0
- success = 0.0
- nb_totals = 0
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- if args.verbose:
- result += line
- else:
- if ("Load duration" in line or
- "started" in line or
- "finished" in line or
- " Preparing" in line or
- "+-" in line or
- "|" in line):
- result += line
- elif "test scenario" in line:
- result += "\n" + line
- elif "Full duration" in line:
- result += line + "\n\n"
-
- # parse output for summary report
- if ("| " in line and
- "| action" not in line and
- "| Starting" not in line and
- "| Completed" not in line and
- "| ITER" not in line and
- "| " not in line and
- "| total" not in line):
- nb_tests += 1
- elif "| total" in line:
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- try:
- success += float(percentage)
- except ValueError:
- logger.info('Percentage error: %s, %s' % (percentage, line))
- nb_totals += 1
- elif "Full duration" in line:
- duration = line.split(': ')[1]
- try:
- overall_duration += float(duration)
- except ValueError:
- logger.info('Duration error: %s, %s' % (duration, line))
-
- overall_duration = "{:10.2f}".format(overall_duration)
- if nb_totals == 0:
- success_avg = 0
- else:
- success_avg = "{:0.2f}".format(success / nb_totals)
-
- scenario_summary = {'test_name': test_name,
- 'overall_duration': overall_duration,
- 'nb_tests': nb_tests,
- 'success': success_avg}
- SUMMARY.append(scenario_summary)
-
- logger.info("\n" + result)
-
- return result
-
-
-def get_cmd_output(proc):
- result = ""
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- result += line
-
- return result
-
-
-def run_task(test_name):
- #
- # the "main" function of the script who launch rally for a task
- # :param test_name: name for the rally test
- # :return: void
- #
- global SUMMARY
- logger.info('Starting test scenario "{}" ...'.format(test_name))
-
- task_file = '{}task.yaml'.format(SCENARIOS_DIR)
- if not os.path.exists(task_file):
- logger.error("Task file '%s' does not exist." % task_file)
- exit(-1)
-
- test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
- test_name)
- if not os.path.exists(test_file_name):
- logger.error("The scenario '%s' does not exist." % test_file_name)
- exit(-1)
-
- logger.debug('Scenario fetched from : {}'.format(test_file_name))
-
- cmd_line = ("rally task start --abort-on-sla-failure " +
- "--task {} ".format(task_file) +
- "--task-args \"{}\" ".format(build_task_args(test_name)))
- logger.debug('running command line : {}'.format(cmd_line))
-
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=RALLY_STDERR, shell=True)
- output = get_output(p, test_name)
- task_id = get_task_id(output)
- logger.debug('task_id : {}'.format(task_id))
-
- if task_id is None:
- logger.error('Failed to retrieve task_id, validating task...')
- cmd_line = ("rally task validate " +
- "--task {} ".format(task_file) +
- "--task-args \"{}\" ".format(build_task_args(test_name)))
- logger.debug('running command line : {}'.format(cmd_line))
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, shell=True)
- output = get_cmd_output(p)
- logger.error("Task validation result:" + "\n" + output)
- return
-
- # check for result directory and create it otherwise
- if not os.path.exists(RESULTS_DIR):
- logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
- os.makedirs(RESULTS_DIR)
-
- # write html report file
- report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
- cmd_line = "rally task report {} --out {}".format(task_id,
- report_file_name)
-
- logger.debug('running command line : {}'.format(cmd_line))
- os.popen(cmd_line)
-
- # get and save rally operation JSON result
- cmd_line = "rally task results %s" % task_id
- logger.debug('running command line : {}'.format(cmd_line))
- cmd = os.popen(cmd_line)
- json_results = cmd.read()
- with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
- logger.debug('saving json file')
- f.write(json_results)
-
- with open('{}opnfv-{}.json'
- .format(RESULTS_DIR, test_name)) as json_file:
- json_data = json.load(json_file)
-
- """ parse JSON operation result """
- status = "failed"
- if task_succeed(json_results):
- logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
- status = "passed"
- else:
- logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
-
- # Push results in payload of testcase
- if args.report:
- logger.debug("Push result into DB")
- push_results_to_db("Rally_details", json_data, status)
-
-
-def main():
- global SUMMARY
- global network_dict
- # configure script
- if not (args.test_name in tests):
- logger.error('argument not valid')
- exit(-1)
-
- SUMMARY = []
- creds_nova = openstack_utils.get_credentials("nova")
- nova_client = novaclient.Client('2', **creds_nova)
- creds_neutron = openstack_utils.get_credentials("neutron")
- neutron_client = neutronclient.Client(**creds_neutron)
- creds_keystone = openstack_utils.get_credentials("keystone")
- keystone_client = keystoneclient.Client(**creds_keystone)
- glance_endpoint = keystone_client.service_catalog.url_for(
- service_type='image', endpoint_type='publicURL')
- glance_client = glanceclient.Client(1, glance_endpoint,
- token=keystone_client.auth_token)
- creds_cinder = openstack_utils.get_credentials("cinder")
- cinder_client = cinderclient.Client('2', creds_cinder['username'],
- creds_cinder['api_key'],
- creds_cinder['project_id'],
- creds_cinder['auth_url'],
- service_type="volume")
-
- client_dict['neutron'] = neutron_client
-
- volume_types = openstack_utils.list_volume_types(cinder_client,
- private=False)
- if not volume_types:
- volume_type = openstack_utils.create_volume_type(
- cinder_client, CINDER_VOLUME_TYPE_NAME)
- if not volume_type:
- logger.error("Failed to create volume type...")
- exit(-1)
- else:
- logger.debug("Volume type '%s' created succesfully..."
- % CINDER_VOLUME_TYPE_NAME)
- else:
- logger.debug("Using existing volume type(s)...")
-
- image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
- image_exists = False
-
- if image_id == '':
- logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
- GLANCE_IMAGE_PATH))
- image_id = openstack_utils.create_glance_image(glance_client,
- GLANCE_IMAGE_NAME,
- GLANCE_IMAGE_PATH)
- if not image_id:
- logger.error("Failed to create the Glance image...")
- exit(-1)
- else:
- logger.debug("Image '%s' with ID '%s' created succesfully ."
- % (GLANCE_IMAGE_NAME, image_id))
- else:
- logger.debug("Using existing image '%s' with ID '%s'..."
- % (GLANCE_IMAGE_NAME, image_id))
- image_exists = True
-
- logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
- network_dict = openstack_utils.create_network_full(logger,
- client_dict['neutron'],
- PRIVATE_NET_NAME,
- PRIVATE_SUBNET_NAME,
- ROUTER_NAME,
- PRIVATE_SUBNET_CIDR)
- if not network_dict:
- logger.error("Failed to create network...")
- exit(-1)
- else:
- if not openstack_utils.update_neutron_net(client_dict['neutron'],
- network_dict['net_id'],
- shared=True):
- logger.error("Failed to update network...")
- exit(-1)
- else:
- logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)
-
- if args.test_name == "all":
- for test_name in tests:
- if not (test_name == 'all' or
- test_name == 'vm'):
- run_task(test_name)
- else:
- logger.debug("Test name: " + args.test_name)
- run_task(args.test_name)
-
- report = ("\n"
- " "
- "\n"
- " Rally Summary Report\n"
- "\n"
- "+===================+============+===============+===========+"
- "\n"
- "| Module | Duration | nb. Test Run | Success |"
- "\n"
- "+===================+============+===============+===========+"
- "\n")
- payload = []
-
- # for each scenario we draw a row for the table
- total_duration = 0.0
- total_nb_tests = 0
- total_success = 0.0
- for s in SUMMARY:
- name = "{0:<17}".format(s['test_name'])
- duration = float(s['overall_duration'])
- total_duration += duration
- duration = time.strftime("%M:%S", time.gmtime(duration))
- duration = "{0:<10}".format(duration)
- nb_tests = "{0:<13}".format(s['nb_tests'])
- total_nb_tests += int(s['nb_tests'])
- success = "{0:<10}".format(str(s['success']) + '%')
- total_success += float(s['success'])
- report += ("" +
- "| " + name + " | " + duration + " | " +
- nb_tests + " | " + success + "|\n" +
- "+-------------------+------------"
- "+---------------+-----------+\n")
- payload.append({'module': name,
- 'details': {'duration': s['overall_duration'],
- 'nb tests': s['nb_tests'],
- 'success': s['success']}})
-
- total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
- total_duration_str2 = "{0:<10}".format(total_duration_str)
- total_nb_tests_str = "{0:<13}".format(total_nb_tests)
- total_success = "{:0.2f}".format(total_success / len(SUMMARY))
- total_success_str = "{0:<10}".format(str(total_success) + '%')
- report += "+===================+============+===============+===========+"
- report += "\n"
- report += ("| TOTAL: | " + total_duration_str2 + " | " +
- total_nb_tests_str + " | " + total_success_str + "|\n")
- report += "+===================+============+===============+===========+"
- report += "\n"
-
- logger.info("\n" + report)
- payload.append({'summary': {'duration': total_duration,
- 'nb tests': total_nb_tests,
- 'nb success': total_success}})
-
- # Generate json results for DB
- # json_results = {"timestart": time_start, "duration": total_duration,
- # "tests": int(total_nb_tests),
- # "success": int(total_success)}
- # logger.info("Results: "+str(json_results))
-
- # Evaluation of the success criteria
- status = "failed"
- # for Rally we decided that the overall success rate must be above 90%
- if total_success >= 90:
- status = "passed"
-
- if args.report:
- logger.debug("Pushing Rally summary into DB...")
- push_results_to_db("Rally", payload, status)
-
- if args.noclean:
- exit(0)
-
- if not image_exists:
- logger.debug("Deleting image '%s' with ID '%s'..."
- % (GLANCE_IMAGE_NAME, image_id))
- if not openstack_utils.delete_glance_image(nova_client, image_id):
- logger.error("Error deleting the glance image")
-
- if not volume_types:
- logger.debug("Deleting volume type '%s'..."
- % CINDER_VOLUME_TYPE_NAME)
- if not openstack_utils.delete_volume_type(cinder_client, volume_type):
- logger.error("Error in deleting volume type...")
-
-
-if __name__ == '__main__':
- main()
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
deleted file mode 100644
index bf62ce306..000000000
--- a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
+++ /dev/null
@@ -1,347 +0,0 @@
-#!/usr/bin/env python
-#
-# Description:
-# Runs tempest and pushes the results to the DB
-#
-# Authors:
-# morgan.richomme@orange.com
-# jose.lausuch@ericsson.com
-# viktor.tikkanen@nokia.com
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import argparse
-import json
-import os
-import re
-import requests
-import shutil
-import subprocess
-import time
-import yaml
-import ConfigParser
-
-import keystoneclient.v2_0.client as ksclient
-from neutronclient.v2_0 import client as neutronclient
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-
-modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing',
- 'identity', 'image', 'network', 'object_storage', 'orchestration',
- 'telemetry', 'volume', 'custom', 'defcore']
-
-""" tests configuration """
-parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug",
- help="Debug mode",
- action="store_true")
-parser.add_argument("-s", "--serial",
- help="Run tests in one thread",
- action="store_true")
-parser.add_argument("-m", "--mode",
- help="Tempest test mode [smoke, all]",
- default="smoke")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-parser.add_argument("-n", "--noclean",
- help="Don't clean the created resources for this test.",
- action="store_true")
-
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("run_tempest").getLogger()
-
-REPO_PATH = os.environ['repos_dir'] + '/functest/'
-
-
-with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
- functest_yaml = yaml.safe_load(f)
-f.close()
-TEST_DB = functest_yaml.get("results").get("test_db_url")
-
-MODE = "smoke"
-PRIVATE_NET_NAME = functest_yaml.get("tempest").get("private_net_name")
-PRIVATE_SUBNET_NAME = functest_yaml.get("tempest").get("private_subnet_name")
-PRIVATE_SUBNET_CIDR = functest_yaml.get("tempest").get("private_subnet_cidr")
-ROUTER_NAME = functest_yaml.get("tempest").get("router_name")
-TENANT_NAME = functest_yaml.get("tempest").get("identity").get("tenant_name")
-TENANT_DESCRIPTION = functest_yaml.get("tempest").get("identity").get(
- "tenant_description")
-USER_NAME = functest_yaml.get("tempest").get("identity").get("user_name")
-USER_PASSWORD = functest_yaml.get("tempest").get("identity").get(
- "user_password")
-DEPLOYMENT_MAME = functest_yaml.get("rally").get("deployment_name")
-RALLY_INSTALLATION_DIR = functest_yaml.get("general").get("directories").get(
- "dir_rally_inst")
-RESULTS_DIR = functest_yaml.get("general").get("directories").get(
- "dir_results")
-TEMPEST_RESULTS_DIR = RESULTS_DIR + '/tempest'
-TEST_LIST_DIR = functest_yaml.get("general").get("directories").get(
- "dir_tempest_cases")
-TEMPEST_CUSTOM = REPO_PATH + TEST_LIST_DIR + 'test_list.txt'
-TEMPEST_BLACKLIST = REPO_PATH + TEST_LIST_DIR + 'blacklist.txt'
-TEMPEST_DEFCORE = REPO_PATH + TEST_LIST_DIR + 'defcore_req.txt'
-TEMPEST_RAW_LIST = TEMPEST_RESULTS_DIR + '/test_raw_list.txt'
-TEMPEST_LIST = TEMPEST_RESULTS_DIR + '/test_list.txt'
-
-
-def get_info(file_result):
- test_run = ""
- duration = ""
- test_failed = ""
-
- p = subprocess.Popen('cat tempest.log',
- shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- for line in p.stdout.readlines():
- # print line,
- if (len(test_run) < 1):
- test_run = re.findall("[0-9]*\.[0-9]*s", line)
- if (len(duration) < 1):
- duration = re.findall("[0-9]*\ tests", line)
- regexp = r"(failures=[0-9]+)"
- if (len(test_failed) < 1):
- test_failed = re.findall(regexp, line)
-
- logger.debug("test_run:" + test_run)
- logger.debug("duration:" + duration)
-
-
-def push_results_to_db(case, payload, criteria):
-
- # TODO move DB creds into config file
- url = TEST_DB + "/results"
- installer = ft_utils.get_installer_type(logger)
- scenario = ft_utils.get_scenario(logger)
- version = ft_utils.get_version(logger)
- pod_name = ft_utils.get_pod_name(logger)
-
- logger.info("Pushing results to DB: '%s'." % url)
-
- params = {"project_name": "functest", "case_name": case,
- "pod_name": str(pod_name), 'installer': installer,
- "version": version, "scenario": scenario, "criteria": criteria,
- 'details': payload}
- headers = {'Content-Type': 'application/json'}
-
- r = requests.post(url, data=json.dumps(params), headers=headers)
- logger.debug(r)
-
-
-def create_tempest_resources():
- ks_creds = os_utils.get_credentials("keystone")
- logger.debug("Creating tenant and user for Tempest suite")
- keystone = ksclient.Client(**ks_creds)
- tenant_id = os_utils.create_tenant(keystone,
- TENANT_NAME,
- TENANT_DESCRIPTION)
- if tenant_id == '':
- logger.error("Error : Failed to create %s tenant" % TENANT_NAME)
-
- user_id = os_utils.create_user(keystone, USER_NAME, USER_PASSWORD,
- None, tenant_id)
- if user_id == '':
- logger.error("Error : Failed to create %s user" % USER_NAME)
-
- logger.debug("Creating private network for Tempest suite")
- creds_neutron = os_utils.get_credentials("neutron")
- neutron_client = neutronclient.Client(**creds_neutron)
- network_dic = os_utils.create_network_full(logger,
- neutron_client,
- PRIVATE_NET_NAME,
- PRIVATE_SUBNET_NAME,
- ROUTER_NAME,
- PRIVATE_SUBNET_CIDR)
- if network_dic:
- if not os_utils.update_neutron_net(neutron_client,
- network_dic['net_id'],
- shared=True):
- logger.error("Failed to update private network...")
- exit(-1)
- else:
- logger.debug("Network '%s' is available..." % PRIVATE_NET_NAME)
- else:
- logger.error("Private network creation failed")
- exit(-1)
-
-
-def configure_tempest(deployment_dir):
- """
- Add/update needed parameters into tempest.conf file generated by Rally
- """
-
- logger.debug("Generating tempest.conf file...")
- cmd = "rally verify genconfig"
- ft_utils.execute_command(cmd, logger)
-
- logger.debug("Finding tempest.conf file...")
- tempest_conf_file = deployment_dir + "/tempest.conf"
- if not os.path.isfile(tempest_conf_file):
- logger.error("Tempest configuration file %s NOT found."
- % tempest_conf_file)
- exit(-1)
-
- logger.debug("Updating selected tempest.conf parameters...")
- config = ConfigParser.RawConfigParser()
- config.read(tempest_conf_file)
- config.set('compute', 'fixed_network_name', PRIVATE_NET_NAME)
- config.set('identity', 'tenant_name', TENANT_NAME)
- config.set('identity', 'username', USER_NAME)
- config.set('identity', 'password', USER_PASSWORD)
- with open(tempest_conf_file, 'wb') as config_file:
- config.write(config_file)
-
- # Copy tempest.conf to /home/opnfv/functest/results/tempest/
- shutil.copyfile(tempest_conf_file, TEMPEST_RESULTS_DIR + '/tempest.conf')
- return True
-
-
-def read_file(filename):
- with open(filename) as src:
- return [line.strip() for line in src.readlines()]
-
-
-def generate_test_list(deployment_dir, mode):
- logger.debug("Generating test case list...")
- if mode == 'defcore':
- shutil.copyfile(TEMPEST_DEFCORE, TEMPEST_RAW_LIST)
- elif mode == 'custom':
- if os.path.isfile(TEMPEST_CUSTOM):
- shutil.copyfile(TEMPEST_CUSTOM, TEMPEST_RAW_LIST)
- else:
- logger.error("Tempest test list file %s NOT found."
- % TEMPEST_CUSTOM)
- exit(-1)
- else:
- if mode == 'smoke':
- testr_mode = "smoke"
- elif mode == 'full':
- testr_mode = ""
- else:
- testr_mode = 'tempest.api.' + mode
- cmd = ("cd " + deployment_dir + ";" + "testr list-tests " +
- testr_mode + ">" + TEMPEST_RAW_LIST + ";cd")
- ft_utils.execute_command(cmd, logger)
-
-
-def apply_tempest_blacklist():
- logger.debug("Applying tempest blacklist...")
- cases_file = read_file(TEMPEST_RAW_LIST)
- result_file = open(TEMPEST_LIST, 'w')
- try:
- black_file = read_file(TEMPEST_BLACKLIST)
- except:
- black_file = ''
- logger.debug("Tempest blacklist file does not exist.")
- for line in cases_file:
- if line not in black_file:
- result_file.write(str(line) + '\n')
- result_file.close()
-
-
-def run_tempest(OPTION):
- #
- # the "main" function of the script which launches Rally to run Tempest
- # :param option: tempest option (smoke, ..)
- # :return: void
- #
- logger.info("Starting Tempest test suite: '%s'." % OPTION)
- cmd_line = "rally verify start " + OPTION + " --system-wide"
- CI_DEBUG = os.environ.get("CI_DEBUG")
- if CI_DEBUG == "true" or CI_DEBUG == "True":
- ft_utils.execute_command(cmd_line, logger, exit_on_error=True)
- else:
- header = ("Tempest environment:\n"
- " Installer: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
- (os.getenv('INSTALLER_TYPE', 'Unknown'),
- os.getenv('DEPLOY_SCENARIO', 'Unknown'),
- os.getenv('NODE_NAME', 'Unknown'),
- time.strftime("%a %b %d %H:%M:%S %Z %Y")))
-
- f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+')
- f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+')
- f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+')
- f_env.write(header)
-
- subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr)
-
- f_stdout.close()
- f_stderr.close()
- f_env.close()
-
- cmd_line = "rally verify show"
- ft_utils.execute_command(cmd_line, logger,
- exit_on_error=True, info=True)
-
- cmd_line = "rally verify list"
- logger.debug('Executing command : {}'.format(cmd_line))
- cmd = os.popen(cmd_line)
- output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|")
- # Format:
- # | UUID | Deployment UUID | smoke | tests | failures | Created at |
- # Duration | Status |
- num_tests = output[4]
- num_failures = output[5]
- time_start = output[6]
- duration = output[7]
- # Compute duration (lets assume it does not take more than 60 min)
- dur_min = int(duration.split(':')[1])
- dur_sec_float = float(duration.split(':')[2])
- dur_sec_int = int(round(dur_sec_float, 0))
- dur_sec_int = dur_sec_int + 60 * dur_min
-
- # Generate json results for DB
- json_results = {"timestart": time_start, "duration": dur_sec_int,
- "tests": int(num_tests), "failures": int(num_failures)}
- logger.info("Results: " + str(json_results))
-
- status = "failed"
- try:
- diff = (int(num_tests) - int(num_failures))
- success_rate = 100 * diff / int(num_tests)
- except:
- success_rate = 0
-
- # For Tempest we assume that teh success rate is above 90%
- if success_rate >= 90:
- status = "passed"
-
- # Push results in payload of testcase
- if args.report:
- logger.debug("Push result into DB")
- push_results_to_db("Tempest", json_results, status)
-
-
-def main():
- global MODE
-
- if not (args.mode in modes):
- logger.error("Tempest mode not valid. "
- "Possible values are:\n" + str(modes))
- exit(-1)
-
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
-
- deployment_dir = ft_utils.get_deployment_dir(logger)
- configure_tempest(deployment_dir)
- create_tempest_resources()
- generate_test_list(deployment_dir, args.mode)
- apply_tempest_blacklist()
-
- MODE = "--tests-file " + TEMPEST_LIST
- if args.serial:
- MODE += " --concur 1"
-
- run_tempest(MODE)
-
-
-if __name__ == '__main__':
- main()