summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xjjb/3rd_party_ci/download-netvirt-artifact.sh16
-rw-r--r--jjb/3rd_party_ci/odl-netvirt.yml7
-rwxr-xr-xjjb/dovetail/dovetail-run.sh5
-rwxr-xr-xjjb/securedlab/check-jinja2.sh2
-rwxr-xr-xjjb/xci/bifrost-provision.sh4
-rwxr-xr-xjjb/xci/bifrost-verify.sh4
-rw-r--r--prototypes/bifrost/playbooks/opnfv-virtual.yaml7
-rw-r--r--prototypes/xci/file/install-ansible.sh136
-rwxr-xr-xprototypes/xci/xci-deploy.sh2
-rw-r--r--utils/create_pod_file.py2
-rwxr-xr-xutils/test/reporting/functest/reporting-status.py407
-rw-r--r--utils/test/reporting/functest/template/index-status-tmpl.html22
-rw-r--r--utils/test/reporting/reporting.yaml7
-rw-r--r--utils/test/reporting/utils/reporting_utils.py39
14 files changed, 432 insertions, 228 deletions
diff --git a/jjb/3rd_party_ci/download-netvirt-artifact.sh b/jjb/3rd_party_ci/download-netvirt-artifact.sh
index 70e307028..7ecf8d78d 100755
--- a/jjb/3rd_party_ci/download-netvirt-artifact.sh
+++ b/jjb/3rd_party_ci/download-netvirt-artifact.sh
@@ -6,11 +6,17 @@ set -o pipefail
ODL_ZIP=distribution-karaf-0.6.0-SNAPSHOT.zip
echo "Attempting to fetch the artifact location from ODL Jenkins"
-CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~master~$GERRIT_CHANGE_ID/detail"
+if [ "$ODL_BRANCH" != 'master' ]; then
+ DIST=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\2#p')
+ ODL_BRANCH=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\1%2F\2#p')
+else
+ DIST='nitrogen'
+fi
+CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~${ODL_BRANCH}~${GERRIT_CHANGE_ID}/detail"
# due to limitation with the Jenkins Gerrit Trigger, we need to use Gerrit REST API to get the change details
-ODL_BUILD_JOB_NUM=$(curl -s $CHANGE_DETAILS_URL | grep -Eo 'netvirt-distribution-check-nitrogen/[0-9]+' | tail -1 | grep -Eo [0-9]+)
-DISTRO_CHECK_CONSOLE_LOG="https://logs.opendaylight.org/releng/jenkins092/netvirt-distribution-check-nitrogen/${ODL_BUILD_JOB_NUM}/console.log.gz"
-NETVIRT_ARTIFACT_URL=$(curl -s --compressed $DISTRO_CHECK_CONSOLE_LOG | grep 'BUNDLE_URL' | cut -d = -f 2)
+ODL_BUILD_JOB_NUM=$(curl --fail -s ${CHANGE_DETAILS_URL} | grep -Eo "netvirt-distribution-check-${DIST}/[0-9]+" | tail -1 | grep -Eo [0-9]+)
+DISTRO_CHECK_CONSOLE_LOG="https://logs.opendaylight.org/releng/jenkins092/netvirt-distribution-check-${DIST}/${ODL_BUILD_JOB_NUM}/console.log.gz"
+NETVIRT_ARTIFACT_URL=$(curl --fail -s --compressed ${DISTRO_CHECK_CONSOLE_LOG} | grep 'BUNDLE_URL' | cut -d = -f 2)
echo -e "URL to artifact is\n\t$NETVIRT_ARTIFACT_URL"
@@ -28,4 +34,4 @@ unzip $ODL_ZIP
tar czf /tmp/${NETVIRT_ARTIFACT} $(echo $ODL_ZIP | sed -n 's/\.zip//p')
echo "Download complete"
-ls -al /tmp/${NETVIRT_ARTIFACT} \ No newline at end of file
+ls -al /tmp/${NETVIRT_ARTIFACT}
diff --git a/jjb/3rd_party_ci/odl-netvirt.yml b/jjb/3rd_party_ci/odl-netvirt.yml
index ac15fe707..a937acbed 100644
--- a/jjb/3rd_party_ci/odl-netvirt.yml
+++ b/jjb/3rd_party_ci/odl-netvirt.yml
@@ -12,6 +12,10 @@
branch: '{stream}'
gs-pathname: ''
disabled: false
+ - carbon:
+ branch: 'stable/carbon'
+ gs-pathname: ''
+ disabled: false
#####################################
# patch verification phases
#####################################
@@ -111,6 +115,7 @@
- name: 'odl-netvirt-verify-virtual-install-netvirt-{stream}'
current-parameters: false
predefined-parameters: |
+ ODL_BRANCH={branch}
BRANCH=$BRANCH
GERRIT_REFSPEC=$GERRIT_REFSPEC
GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
@@ -125,7 +130,7 @@
name: functest
condition: SUCCESSFUL
projects:
- - name: 'functest-netvirt-virtual-suite-{stream}'
+ - name: 'functest-netvirt-virtual-suite-master'
predefined-parameters: |
DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
FUNCTEST_SUITE_NAME=odl_netvirt
diff --git a/jjb/dovetail/dovetail-run.sh b/jjb/dovetail/dovetail-run.sh
index 5f462e9c3..cee9e5929 100755
--- a/jjb/dovetail/dovetail-run.sh
+++ b/jjb/dovetail/dovetail-run.sh
@@ -65,6 +65,11 @@ else
fi
pod_file_dir="/home/opnfv/dovetail/userconfig"
+if [ -d ${pod_file_dir} ]; then
+ sudo rm -r ${pod_file_dir}/*
+else
+ sudo mkdir -p ${pod_file_dir}
+fi
cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} -i ${INSTALLER_IP} ${options} -f ${pod_file_dir}/pod.yaml"
echo ${cmd}
${cmd}
diff --git a/jjb/securedlab/check-jinja2.sh b/jjb/securedlab/check-jinja2.sh
index 84907e5eb..57650ec28 100755
--- a/jjb/securedlab/check-jinja2.sh
+++ b/jjb/securedlab/check-jinja2.sh
@@ -5,5 +5,5 @@ for lab_configs in $(find labs/ -name 'pod.yaml'); do
while IFS= read -r jinja_templates; do
echo "./utils/generate_config.py -y $lab_configs -j $jinja_templates"
./utils/generate_config.py -y $lab_configs -j $jinja_templates
- done < <(find installers/ -name 'pod_config.yaml.j2')
+ done < <(find installers/ -name '*.j2')
done
diff --git a/jjb/xci/bifrost-provision.sh b/jjb/xci/bifrost-provision.sh
index 4724c2ee5..b37da9059 100755
--- a/jjb/xci/bifrost-provision.sh
+++ b/jjb/xci/bifrost-provision.sh
@@ -82,13 +82,13 @@ sudo -E ./scripts/destroy-env.sh
# provision VMs for the flavor
cd /opt/bifrost
-sudo -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
# list the provisioned VMs
cd /opt/bifrost
source env-vars
ironic node-list
-virsh list
+sudo -H -E virsh list
echo "OpenStack nodes are provisioned!"
# here we have to do something in order to capture what was the working sha1
diff --git a/jjb/xci/bifrost-verify.sh b/jjb/xci/bifrost-verify.sh
index 29af7ca3b..18019a7cb 100755
--- a/jjb/xci/bifrost-verify.sh
+++ b/jjb/xci/bifrost-verify.sh
@@ -117,10 +117,10 @@ sudo -H -E ./scripts/destroy-env.sh
# provision 3 VMs; xcimaster, controller, and compute
cd /opt/bifrost
-sudo -H -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
# list the provisioned VMs
cd /opt/bifrost
source env-vars
ironic node-list
-virsh list
+sudo -H -E virsh list
diff --git a/prototypes/bifrost/playbooks/opnfv-virtual.yaml b/prototypes/bifrost/playbooks/opnfv-virtual.yaml
index 699c96698..94de628a6 100644
--- a/prototypes/bifrost/playbooks/opnfv-virtual.yaml
+++ b/prototypes/bifrost/playbooks/opnfv-virtual.yaml
@@ -59,12 +59,7 @@
dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
when: create_image_via_dib | bool == true and transform_boot_image | bool == false
- role: bifrost-keystone-client-config
- # NOTE(hwoarang): This should be ansible_env.SUDO_USER like in the
- # upstream playbook. However, we run ansible as root (ie with sudo)
- # so clouds.yaml will be placed in the user's home directory (see
- # the bifrost-keystone-client-config role) and then ansible will look
- # for one in /root and fail. As such we hardcode the user to be 'root'.
- user: "root"
+ user: "{{ ansible_env.SUDO_USER }}"
clouds:
bifrost:
config_username: "{{ ironic.keystone.default_username }}"
diff --git a/prototypes/xci/file/install-ansible.sh b/prototypes/xci/file/install-ansible.sh
new file mode 100644
index 000000000..daa7f516d
--- /dev/null
+++ b/prototypes/xci/file/install-ansible.sh
@@ -0,0 +1,136 @@
+#!/bin/bash
+# NOTE(hwoarang): Most parts of this this file were taken from the
+# bifrost repository (scripts/install-deps.sh). This script contains all
+# the necessary distro specific code to install ansible and it's dependencies.
+
+set -eu
+
+declare -A PKG_MAP
+
+CHECK_CMD_PKGS=(
+ libffi
+ libopenssl
+ net-tools
+ python-devel
+)
+
+# Check zypper before apt-get in case zypper-aptitude
+# is installed
+if [ -x '/usr/bin/zypper' ]; then
+ OS_FAMILY="Suse"
+ INSTALLER_CMD="sudo -H -E zypper install -y"
+ CHECK_CMD="zypper search --match-exact --installed"
+ PKG_MAP=(
+ [gcc]=gcc
+ [git]=git
+ [libffi]=libffi-devel
+ [libopenssl]=libopenssl-devel
+ [net-tools]=net-tools
+ [python]=python
+ [python-devel]=python-devel
+ [venv]=python-virtualenv
+ [wget]=wget
+ )
+ EXTRA_PKG_DEPS=( python-xml )
+ # NOTE (cinerama): we can't install python without removing this package
+ # if it exists
+ if $(${CHECK_CMD} patterns-openSUSE-minimal_base-conflicts &> /dev/null); then
+ sudo -H zypper remove -y patterns-openSUSE-minimal_base-conflicts
+ fi
+elif [ -x '/usr/bin/apt-get' ]; then
+ OS_FAMILY="Debian"
+ INSTALLER_CMD="sudo -H -E apt-get -y install"
+ CHECK_CMD="dpkg -l"
+ PKG_MAP=( [gcc]=gcc
+ [git]=git
+ [libffi]=libffi-dev
+ [libopenssl]=libssl-dev
+ [net-tools]=net-tools
+ [python]=python-minimal
+ [python-devel]=libpython-dev
+ [venv]=python-virtualenv
+ [wget]=wget
+ )
+ EXTRA_PKG_DEPS=()
+elif [ -x '/usr/bin/dnf' ] || [ -x '/usr/bin/yum' ]; then
+ OS_FAMILY="RedHat"
+ PKG_MANAGER=$(which dnf || which yum)
+ INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -y install"
+ CHECK_CMD="rpm -q"
+ PKG_MAP=(
+ [gcc]=gcc
+ [git]=git
+ [libffi]=libffi-devel
+ [libopenssl]=openssl-devel
+ [net-tools]=net-tools
+ [python]=python
+ [python-devel]=python-devel
+ [venv]=python-virtualenv
+ [wget]=wget
+ )
+ EXTRA_PKG_DEPS=()
+else
+ echo "ERROR: Supported package manager not found. Supported: apt,yum,zypper"
+fi
+
+if ! $(python --version &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[python]}
+fi
+if ! $(gcc -v &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[gcc]}
+fi
+if ! $(git --version &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[git]}
+fi
+if ! $(wget --version &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[wget]}
+fi
+
+for pkg in ${CHECK_CMD_PKGS[@]}; do
+ if ! $(${CHECK_CMD} ${PKG_MAP[$pkg]} &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[$pkg]}
+ fi
+done
+
+if [ -n "${EXTRA_PKG_DEPS-}" ]; then
+ for pkg in ${EXTRA_PKG_DEPS}; do
+ if ! $(${CHECK_CMD} ${pkg} &>/dev/null); then
+ ${INSTALLER_CMD} ${pkg}
+ fi
+ done
+fi
+
+# If we're using a venv, we need to work around sudo not
+# keeping the path even with -E.
+PYTHON=$(which python)
+
+# To install python packages, we need pip.
+#
+# We can't use the apt packaged version of pip since
+# older versions of pip are incompatible with
+# requests, one of our indirect dependencies (bug 1459947).
+#
+# Note(cinerama): We use pip to install an updated pip plus our
+# other python requirements. pip breakages can seriously impact us,
+# so we've chosen to install/upgrade pip here rather than in
+# requirements (which are synced automatically from the global ones)
+# so we can quickly and easily adjust version parameters.
+# See bug 1536627.
+#
+# Note(cinerama): If pip is linked to pip3, the rest of the install
+# won't work. Remove the alternatives. This is due to ansible's
+# python 2.x requirement.
+if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then
+ sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip)
+fi
+
+if ! which pip; then
+ wget -O /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
+ sudo -H -E ${PYTHON} /tmp/get-pip.py
+fi
+
+PIP=$(which pip)
+
+sudo -H -E ${PIP} install "pip>6.0"
+
+pip install ansible==$XCI_ANSIBLE_PIP_VERSION
diff --git a/prototypes/xci/xci-deploy.sh b/prototypes/xci/xci-deploy.sh
index 2fd9be022..718ed73c2 100755
--- a/prototypes/xci/xci-deploy.sh
+++ b/prototypes/xci/xci-deploy.sh
@@ -50,7 +50,7 @@ echo "-------------------------------------------------------------------------"
#-------------------------------------------------------------------------------
# Install ansible on localhost
#-------------------------------------------------------------------------------
-pip install ansible==$XCI_ANSIBLE_PIP_VERSION
+source file/install-ansible.sh
# TODO: The xci playbooks can be put into a playbook which will be done later.
diff --git a/utils/create_pod_file.py b/utils/create_pod_file.py
index 7e30cc639..197e4933c 100644
--- a/utils/create_pod_file.py
+++ b/utils/create_pod_file.py
@@ -58,7 +58,7 @@ def create_file(handler):
Other installers use key file of each node.
"""
if not os.path.exists(os.path.dirname(args.filepath)):
- os.path.makedirs(os.path.dirname(args.filepath))
+ os.makedirs(os.path.dirname(args.filepath))
nodes = handler.nodes
node_list = []
index = 1
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
index af1d1d8a5..94e7f2f3e 100755
--- a/utils/test/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -9,10 +9,8 @@
import datetime
import jinja2
import os
-import requests
import sys
import time
-import yaml
import testCase as tc
import scenarioResult as sr
@@ -43,9 +41,7 @@ log_level = rp_utils.get_config('general.log.log_level')
exclude_noha = rp_utils.get_config('functest.exclude_noha')
exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
-response = requests.get(cf)
-
-functest_yaml_config = yaml.safe_load(response.text)
+functest_yaml_config = rp_utils.getFunctestConfig()
logger.info("*******************************************")
logger.info("* *")
@@ -69,128 +65,117 @@ config_tiers = functest_yaml_config.get("tiers")
for tier in config_tiers:
if tier['order'] >= 0 and tier['order'] < 2:
for case in tier['testcases']:
- if case['name'] not in blacklist:
- testValid.append(tc.TestCase(case['name'],
+ if case['case_name'] not in blacklist:
+ testValid.append(tc.TestCase(case['case_name'],
"functest",
case['dependencies']))
elif tier['order'] == 2:
for case in tier['testcases']:
- if case['name'] not in blacklist:
- testValid.append(tc.TestCase(case['name'],
- case['name'],
+ if case['case_name'] not in blacklist:
+ testValid.append(tc.TestCase(case['case_name'],
+ case['case_name'],
case['dependencies']))
elif tier['order'] > 2:
for case in tier['testcases']:
- if case['name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['name'],
+ if case['case_name'] not in blacklist:
+ otherTestCases.append(tc.TestCase(case['case_name'],
"functest",
case['dependencies']))
logger.debug("Functest reporting start")
+
# For all the versions
for version in versions:
# For all the installers
+ scenario_directory = "./display/" + version + "/functest/"
+ scenario_file_name = scenario_directory + "scenario_history.txt"
+
+ # check that the directory exists, if not create it
+ # (first run on new version)
+ if not os.path.exists(scenario_directory):
+ os.makedirs(scenario_directory)
+
+ # initiate scenario file if it does not exist
+ if not os.path.isfile(scenario_file_name):
+ with open(scenario_file_name, "a") as my_file:
+ logger.debug("Create scenario file: %s" % scenario_file_name)
+ my_file.write("date,scenario,installer,detail,score\n")
+
for installer in installers:
+
# get scenarios
scenario_results = rp_utils.getScenarios(healthcheck,
installer,
version)
- scenario_stats = rp_utils.getScenarioStats(scenario_results)
- items = {}
- scenario_result_criteria = {}
- scenario_directory = "./display/" + version + "/functest/"
- scenario_file_name = scenario_directory + "scenario_history.txt"
-
- # check that the directory exists, if not create it
- # (first run on new version)
- if not os.path.exists(scenario_directory):
- os.makedirs(scenario_directory)
-
- # initiate scenario file if it does not exist
- if not os.path.isfile(scenario_file_name):
- with open(scenario_file_name, "a") as my_file:
- logger.debug("Create scenario file: %s" % scenario_file_name)
- my_file.write("date,scenario,installer,detail,score\n")
-
- # For all the scenarios get results
- for s, s_result in scenario_results.items():
- logger.info("---------------------------------")
- logger.info("installer %s, version %s, scenario %s:" %
- (installer, version, s))
- logger.debug("Scenario results: %s" % s_result)
-
- # Green or Red light for a given scenario
- nb_test_runnable_for_this_scenario = 0
- scenario_score = 0
- # url of the last jenkins log corresponding to a given
- # scenario
- s_url = ""
- if len(s_result) > 0:
- build_tag = s_result[len(s_result)-1]['build_tag']
- logger.debug("Build tag: %s" % build_tag)
- s_url = rp_utils.getJenkinsUrl(build_tag)
- if s_url is None:
- s_url = "http://testresultS.opnfv.org/reporting"
- logger.info("last jenkins url: %s" % s_url)
- testCases2BeDisplayed = []
- # Check if test case is runnable / installer, scenario
- # for the test case used for Scenario validation
- try:
- # 1) Manage the test cases for the scenario validation
- # concretely Tiers 0-3
- for test_case in testValid:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- logger.debug("testcase %s (%s) is %s" %
- (test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable))
- time.sleep(1)
- if test_case.isRunnable:
- dbName = test_case.getDbName()
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- nb_test_runnable_for_this_scenario += 1
- logger.info(" Searching results for case %s " %
- (displayName))
- result = rp_utils.getResult(dbName, installer,
- s, version)
- # if no result set the value to 0
- if result < 0:
- result = 0
- logger.info(" >>>> Test score = " + str(result))
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 1))
- scenario_score = scenario_score + result
-
- # 2) Manage the test cases for the scenario qualification
- # concretely Tiers > 3
- for test_case in otherTestCases:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- logger.debug("testcase %s (%s) is %s" %
- (test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable))
- time.sleep(1)
- if test_case.isRunnable:
- dbName = test_case.getDbName()
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- logger.info(" Searching results for case %s " %
- (displayName))
- result = rp_utils.getResult(dbName, installer,
- s, version)
- # at least 1 result for the test
- if result > -1:
+
+ # get nb of supported architecture (x86, aarch64)
+ architectures = rp_utils.getArchitectures(scenario_results)
+ logger.info("Supported architectures: {}".format(architectures))
+
+ for architecture in architectures:
+ logger.info("architecture: {}".format(architecture))
+ # Consider only the results for the selected architecture
+ # i.e drop x86 for aarch64 and vice versa
+ filter_results = rp_utils.filterArchitecture(scenario_results,
+ architecture)
+ scenario_stats = rp_utils.getScenarioStats(filter_results)
+ items = {}
+ scenario_result_criteria = {}
+
+ # in case of more than 1 architecture supported
+ # precise the architecture
+ installer_display = installer
+ if (len(architectures) > 1):
+ installer_display = installer + "@" + architecture
+
+ # For all the scenarios get results
+ for s, s_result in filter_results.items():
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s:" %
+ (installer, version, s))
+ logger.debug("Scenario results: %s" % s_result)
+
+ # Green or Red light for a given scenario
+ nb_test_runnable_for_this_scenario = 0
+ scenario_score = 0
+ # url of the last jenkins log corresponding to a given
+ # scenario
+ s_url = ""
+ if len(s_result) > 0:
+ build_tag = s_result[len(s_result)-1]['build_tag']
+ logger.debug("Build tag: %s" % build_tag)
+ s_url = rp_utils.getJenkinsUrl(build_tag)
+ if s_url is None:
+ s_url = "http://testresultS.opnfv.org/reporting"
+ logger.info("last jenkins url: %s" % s_url)
+ testCases2BeDisplayed = []
+ # Check if test case is runnable / installer, scenario
+ # for the test case used for Scenario validation
+ try:
+ # 1) Manage the test cases for the scenario validation
+ # concretely Tiers 0-3
+ for test_case in testValid:
+ test_case.checkRunnable(installer, s,
+ test_case.getConstraints())
+ logger.debug("testcase %s (%s) is %s" %
+ (test_case.getDisplayName(),
+ test_case.getName(),
+ test_case.isRunnable))
+ time.sleep(1)
+ if test_case.isRunnable:
+ dbName = test_case.getDbName()
+ name = test_case.getName()
+ displayName = test_case.getDisplayName()
+ project = test_case.getProject()
+ nb_test_runnable_for_this_scenario += 1
+ logger.info(" Searching results for case %s " %
+ (displayName))
+ result = rp_utils.getResult(dbName, installer,
+ s, version)
+ # if no result set the value to 0
+ if result < 0:
+ result = 0
+ logger.info(" >>>> Test score = " + str(result))
test_case.setCriteria(result)
test_case.setIsRunnable(True)
testCases2BeDisplayed.append(tc.TestCase(name,
@@ -198,91 +183,127 @@ for version in versions:
"",
result,
True,
- 4))
- else:
- logger.debug("No results found")
-
- items[s] = testCases2BeDisplayed
- except:
- logger.error("Error: installer %s, version %s, scenario %s" %
- (installer, version, s))
- logger.error("No data available: %s " % (sys.exc_info()[0]))
-
- # **********************************************
- # Evaluate the results for scenario validation
- # **********************************************
- # the validation criteria = nb runnable tests x 3
- # because each test case = 0,1,2 or 3
- scenario_criteria = nb_test_runnable_for_this_scenario * 3
- # if 0 runnable tests set criteria at a high value
- if scenario_criteria < 1:
- scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA
-
- s_score = str(scenario_score) + "/" + str(scenario_criteria)
- s_score_percent = rp_utils.getScenarioPercent(scenario_score,
- scenario_criteria)
-
- s_status = "KO"
- if scenario_score < scenario_criteria:
- logger.info(">>>> scenario not OK, score = %s/%s" %
- (scenario_score, scenario_criteria))
+ 1))
+ scenario_score = scenario_score + result
+
+ # 2) Manage the test cases for the scenario qualification
+ # concretely Tiers > 3
+ for test_case in otherTestCases:
+ test_case.checkRunnable(installer, s,
+ test_case.getConstraints())
+ logger.debug("testcase %s (%s) is %s" %
+ (test_case.getDisplayName(),
+ test_case.getName(),
+ test_case.isRunnable))
+ time.sleep(1)
+ if test_case.isRunnable:
+ dbName = test_case.getDbName()
+ name = test_case.getName()
+ displayName = test_case.getDisplayName()
+ project = test_case.getProject()
+ logger.info(" Searching results for case %s " %
+ (displayName))
+ result = rp_utils.getResult(dbName, installer,
+ s, version)
+ # at least 1 result for the test
+ if result > -1:
+ test_case.setCriteria(result)
+ test_case.setIsRunnable(True)
+ testCases2BeDisplayed.append(tc.TestCase(
+ name,
+ project,
+ "",
+ result,
+ True,
+ 4))
+ else:
+ logger.debug("No results found")
+
+ items[s] = testCases2BeDisplayed
+ except:
+ logger.error("Error: installer %s, version %s, scenario %s"
+ % (installer, version, s))
+ logger.error("No data available: %s" % (sys.exc_info()[0]))
+
+ # **********************************************
+ # Evaluate the results for scenario validation
+ # **********************************************
+ # the validation criteria = nb runnable tests x 3
+ # because each test case = 0,1,2 or 3
+ scenario_criteria = nb_test_runnable_for_this_scenario * 3
+ # if 0 runnable tests set criteria at a high value
+ if scenario_criteria < 1:
+ scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA
+
+ s_score = str(scenario_score) + "/" + str(scenario_criteria)
+ s_score_percent = rp_utils.getScenarioPercent(
+ scenario_score,
+ scenario_criteria)
+
s_status = "KO"
- else:
- logger.info(">>>>> scenario OK, save the information")
- s_status = "OK"
- path_validation_file = ("./display/" + version +
- "/functest/" +
- "validated_scenario_history.txt")
- with open(path_validation_file, "a") as f:
- time_format = "%Y-%m-%d %H:%M"
- info = (datetime.datetime.now().strftime(time_format) +
- ";" + installer + ";" + s + "\n")
+ if scenario_score < scenario_criteria:
+ logger.info(">>>> scenario not OK, score = %s/%s" %
+ (scenario_score, scenario_criteria))
+ s_status = "KO"
+ else:
+ logger.info(">>>>> scenario OK, save the information")
+ s_status = "OK"
+ path_validation_file = ("./display/" + version +
+ "/functest/" +
+ "validated_scenario_history.txt")
+ with open(path_validation_file, "a") as f:
+ time_format = "%Y-%m-%d %H:%M"
+ info = (datetime.datetime.now().strftime(time_format) +
+ ";" + installer_display + ";" + s + "\n")
+ f.write(info)
+
+ # Save daily results in a file
+ with open(scenario_file_name, "a") as f:
+ info = (reportingDate + "," + s + "," + installer_display +
+ "," + s_score + "," +
+ str(round(s_score_percent)) + "\n")
f.write(info)
- # Save daily results in a file
- with open(scenario_file_name, "a") as f:
- info = (reportingDate + "," + s + "," + installer +
- "," + s_score + "," +
- str(round(s_score_percent)) + "\n")
- f.write(info)
-
- scenario_result_criteria[s] = sr.ScenarioResult(s_status,
- s_score,
- s_score_percent,
- s_url)
- logger.info("--------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(
- loader=templateLoader, autoescape=True)
-
- TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_stats=scenario_stats,
- scenario_results=scenario_result_criteria,
- items=items,
- installer=installer,
- period=period,
- version=version,
- date=reportingDate)
-
- with open("./display/" + version +
- "/functest/status-" + installer + ".html", "wb") as fh:
- fh.write(outputText)
-
- logger.info("Manage export CSV & PDF")
- rp_utils.export_csv(scenario_file_name, installer, version)
- logger.error("CSV generated...")
-
- # Generate outputs for export
- # pdf
- # TODO Change once web site updated...use the current one
- # to test pdf production
- url_pdf = rp_utils.get_config('general.url')
- pdf_path = ("./display/" + version +
- "/functest/status-" + installer + ".html")
- pdf_doc_name = ("./display/" + version +
- "/functest/status-" + installer + ".pdf")
- rp_utils.export_pdf(pdf_path, pdf_doc_name)
- logger.info("PDF generated...")
+ scenario_result_criteria[s] = sr.ScenarioResult(
+ s_status,
+ s_score,
+ s_score_percent,
+ s_url)
+ logger.info("--------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ templateEnv = jinja2.Environment(
+ loader=templateLoader, autoescape=True)
+
+ TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(
+ scenario_stats=scenario_stats,
+ scenario_results=scenario_result_criteria,
+ items=items,
+ installer=installer_display,
+ period=period,
+ version=version,
+ date=reportingDate)
+
+ with open("./display/" + version +
+ "/functest/status-" +
+ installer_display + ".html", "wb") as fh:
+ fh.write(outputText)
+
+ logger.info("Manage export CSV & PDF")
+ rp_utils.export_csv(scenario_file_name, installer_display, version)
+ logger.error("CSV generated...")
+
+ # Generate outputs for export
+ # pdf
+ # TODO Change once web site updated...use the current one
+ # to test pdf production
+ url_pdf = rp_utils.get_config('general.url')
+ pdf_path = ("./display/" + version +
+ "/functest/status-" + installer_display + ".html")
+ pdf_doc_name = ("./display/" + version +
+ "/functest/status-" + installer_display + ".pdf")
+ rp_utils.export_pdf(pdf_path, pdf_doc_name)
+ logger.info("PDF generated...")
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
index 52046c37f..ebacfd159 100644
--- a/utils/test/reporting/functest/template/index-status-tmpl.html
+++ b/utils/test/reporting/functest/template/index-status-tmpl.html
@@ -15,27 +15,27 @@
{% for scenario in scenario_stats.iteritems() -%}
var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
{%- endfor %}
-
+
// assign success rate to the gauge
function updateReadings() {
{% for scenario,iteration in scenario_stats.iteritems() -%}
gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
{%- endfor %}
}
- updateReadings();
+ updateReadings();
}
-
+
// trend line management
- d3.csv("./scenario_history.csv", function(data) {
+ d3.csv("./scenario_history.txt", function(data) {
// ***************************************
// Create the trend line
{% for scenario,iteration in scenario_stats.iteritems() -%}
- // for scenario {{scenario}}
+ // for scenario {{scenario}}
// Filter results
- var trend{{loop.index}} = data.filter(function(row) {
+ var trend{{loop.index}} = data.filter(function(row) {
return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
})
- // Parse the date
+ // Parse the date
trend{{loop.index}}.forEach(function(d) {
d.date = parseDate(d.date);
d.score = +d.score
@@ -44,7 +44,7 @@
var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
// ****************************************
{%- endfor %}
- });
+ });
if ( !window.isLoaded ) {
window.addEventListener("load", function() {
onDocumentReady();
@@ -61,7 +61,7 @@ $(document).ready(function (){
});
})
</script>
-
+
</head>
<body>
<div class="container">
@@ -72,8 +72,8 @@ $(document).ready(function (){
<li class="active"><a href="../../index.html">Home</a></li>
<li><a href="status-apex.html">Apex</a></li>
<li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-daisy.html">Daisy</a></li>
- <li><a href="status-fuel.html">Fuel</a></li>
+ <li><a href="status-fuel@x86.html">fuel@x86</a></li>
+ <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
<li><a href="status-joid.html">Joid</a></li>
</ul>
</nav>
diff --git a/utils/test/reporting/reporting.yaml b/utils/test/reporting/reporting.yaml
index 8c5ce1383..73781eb56 100644
--- a/utils/test/reporting/reporting.yaml
+++ b/utils/test/reporting/reporting.yaml
@@ -3,7 +3,6 @@ general:
installers:
- apex
- compass
- - daisy
- fuel
- joid
@@ -37,7 +36,6 @@ functest:
blacklist:
- ovno
- security_scan
- - rally_sanity
- healthcheck
- odl_netvirt
- aaa
@@ -45,13 +43,12 @@ functest:
- orchestra_ims
- juju_epc
- orchestra
- - promise
max_scenario_criteria: 50
test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
log_level: ERROR
jenkins_url: https://build.opnfv.org/ci/view/functest/job/
- exclude_noha: "False"
- exclude_virtual: "False"
+ exclude_noha: False
+ exclude_virtual: False
yardstick:
test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml
diff --git a/utils/test/reporting/utils/reporting_utils.py b/utils/test/reporting/utils/reporting_utils.py
index aab7a3f4f..98ade2c30 100644
--- a/utils/test/reporting/utils/reporting_utils.py
+++ b/utils/test/reporting/utils/reporting_utils.py
@@ -10,6 +10,7 @@ from urllib2 import Request, urlopen, URLError
import logging
import json
import os
+import requests
import pdfkit
import yaml
@@ -299,6 +300,44 @@ def getScenarioPercent(scenario_score, scenario_criteria):
# *********
+# Functest
+# *********
+def getFunctestConfig(version=""):
+ config_file = get_config('functest.test_conf') + version
+ response = requests.get(config_file)
+ return yaml.safe_load(response.text)
+
+
+def getArchitectures(scenario_results):
+ supported_arch = ['x86']
+ if (len(scenario_results) > 0):
+ for scenario_result in scenario_results.values():
+ for value in scenario_result:
+ if ("armband" in value['build_tag']):
+ supported_arch.append('aarch64')
+ return supported_arch
+ return supported_arch
+
+
+def filterArchitecture(results, architecture):
+ filtered_results = {}
+ for name, results in results.items():
+ filtered_values = []
+ for value in results:
+ if (architecture is "x86"):
+ # drop aarch64 results
+ if ("armband" not in value['build_tag']):
+ filtered_values.append(value)
+ elif(architecture is "aarch64"):
+ # drop x86 results
+ if ("armband" in value['build_tag']):
+ filtered_values.append(value)
+ if (len(filtered_values) > 0):
+ filtered_results[name] = filtered_values
+ return filtered_results
+
+
+# *********
# Yardstick
# *********
def subfind(given_list, pattern_list):