summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--jjb/doctor/doctor.yml6
-rw-r--r--jjb/dovetail/dovetail-ci-jobs.yml177
-rwxr-xr-xjjb/dovetail/dovetail-cleanup.sh20
-rwxr-xr-xjjb/dovetail/dovetail-run.sh52
-rw-r--r--jjb/opnfvdocs/project.cfg2
-rwxr-xr-xprototypes/bifrost/scripts/destroy-env.sh4
-rw-r--r--utils/test/dashboard.tar.gzbin0 -> 14627 bytes
-rw-r--r--utils/test/dashboard/backup-db.sh (renamed from utils/test/scripts/backup-db.sh)0
-rw-r--r--utils/test/dashboard/dashboard/__init__.py0
-rw-r--r--utils/test/dashboard/dashboard/common/__init__.py0
-rw-r--r--utils/test/dashboard/dashboard/common/elastic_access.py (renamed from utils/test/scripts/shared_utils.py)0
-rw-r--r--utils/test/dashboard/dashboard/common/logger_utils.py (renamed from utils/test/scripts/logger_utils.py)4
-rw-r--r--utils/test/dashboard/dashboard/conf/__init__.py0
-rw-r--r--utils/test/dashboard/dashboard/conf/config.py (renamed from utils/test/scripts/config.py)2
-rw-r--r--utils/test/dashboard/dashboard/conf/testcases.py (renamed from utils/test/scripts/testcases_parser.py)2
-rw-r--r--utils/test/dashboard/dashboard/elastic2kibana/__init__.py0
-rw-r--r--utils/test/dashboard/dashboard/elastic2kibana/main.py (renamed from utils/test/scripts/create_kibana_dashboards.py)32
-rw-r--r--utils/test/dashboard/dashboard/elastic2kibana_main.py4
-rw-r--r--utils/test/dashboard/dashboard/functest/__init__.py0
-rw-r--r--utils/test/dashboard/dashboard/functest/testcases.yaml (renamed from utils/test/scripts/testcases.yaml)0
-rw-r--r--utils/test/dashboard/dashboard/mongo2elastic/__init__.py0
-rw-r--r--utils/test/dashboard/dashboard/mongo2elastic/format.py (renamed from utils/test/scripts/mongo2elastic_format.py)0
-rw-r--r--utils/test/dashboard/dashboard/mongo2elastic/main.py (renamed from utils/test/scripts/mongo_to_elasticsearch.py)25
-rw-r--r--utils/test/dashboard/dashboard/mongo2elastic_main.py4
-rw-r--r--utils/test/dashboard/etc/config.ini (renamed from utils/test/scripts/config.ini)2
-rw-r--r--utils/test/dashboard/kibana_cleanup.py (renamed from utils/test/scripts/kibana_cleanup.py)6
-rwxr-xr-xutils/test/reporting/functest/reporting-status.py41
-rw-r--r--utils/test/reporting/functest/reportingConf.py4
-rw-r--r--utils/test/reporting/functest/reportingUtils.py20
-rw-r--r--utils/test/reporting/functest/scenarioResult.py8
-rw-r--r--utils/test/reporting/functest/template/index-status-tmpl.html4
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py8
-rwxr-xr-xutils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py199
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py76
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py105
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py472
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py100
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py121
-rwxr-xr-xutils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py121
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py209
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/resources/dashboard_handlers.py120
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/router/url_mappings.py10
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard.py78
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard_project.py20
44 files changed, 361 insertions, 1697 deletions
diff --git a/jjb/doctor/doctor.yml b/jjb/doctor/doctor.yml
index 2010e1229..f93ac9bef 100644
--- a/jjb/doctor/doctor.yml
+++ b/jjb/doctor/doctor.yml
@@ -136,3 +136,9 @@
TESTCASE_OPTIONS=-e INSPECTOR_TYPE=congress -v $WORKSPACE:$HOME/opnfv/repos/doctor
block: true
same-node: true
+ - shell: |
+ logfile=$HOME/opnfv/functest/results/{stream}/doctor.log
+ echo
+ echo "[$logfile]"
+ echo
+ [ -e $logfile ] && cat $logfile
diff --git a/jjb/dovetail/dovetail-ci-jobs.yml b/jjb/dovetail/dovetail-ci-jobs.yml
new file mode 100644
index 000000000..9d2f69da9
--- /dev/null
+++ b/jjb/dovetail/dovetail-ci-jobs.yml
@@ -0,0 +1,177 @@
+###################################
+# job configuration for dovetail
+###################################
+- project:
+ name: dovetail
+
+ project: '{name}'
+
+#---------------------------------------
+# BRANCH ANCHORS
+#---------------------------------------
+# 1)the stream/branch here represents the SUT(System Under Test) stream/branch
+# 2)docker-tag is the docker tag of dovetail(only master by now, then all latest used)
+# the dovetail stream is one-to-one mapping with dovetail docker-tag
+# the dovetail is not sync with A/B/C release
+#
+ master: &master
+ stream: master
+ branch: '{stream}'
+ gs-pathname: ''
+ docker-tag: 'latest'
+ colorado: &colorado
+ stream: colorado
+ branch: 'stable/{stream}'
+ gs-pathname: '{stream}'
+ docker-tag: 'latest'
+
+#-----------------------------------
+# POD, PLATFORM, AND BRANCH MAPPING
+#-----------------------------------
+# CI PODs
+# This section should only contain the SUTs
+# that have been switched using labels for slaves
+#------------------------------------------------
+# the pods, SUTs listed here are just examples to
+# let the dovetail tool run, there can be more ways beside CI to
+# run the dovetail tool.
+# pods, SUTs will be added/adjusted when needed
+ pod:
+# fuel CI PODs
+ - baremetal:
+ slave-label: fuel-baremetal
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - virtual:
+ slave-label: fuel-virtual
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - baremetal:
+ slave-label: fuel-baremetal
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
+ - virtual:
+ slave-label: fuel-virtual
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
+#compass CI PODs
+ - baremetal:
+ slave-label: compass-baremetal
+ SUT: compass
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - virtual:
+ slave-label: compass-virtual
+ SUT: compass
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - baremetal:
+ slave-label: compass-baremetal
+ SUT: compass
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
+ - virtual:
+ slave-label: compass-virtual
+ SUT: compass
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
+#--------------------------------
+# None-CI PODs
+#--------------------------------
+ - huawei-pod5:
+ slave-label: '{pod}'
+ SUT: compass
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+#--------------------------------
+ testsuite:
+ - 'basic'
+
+ jobs:
+ - 'dovetail-{SUT}-{pod}-{testsuite}-{stream}'
+
+################################
+# job templates
+################################
+- job-template:
+ name: 'dovetail-{SUT}-{pod}-{testsuite}-{stream}'
+
+ disabled: false
+
+ concurrent: true
+
+ properties:
+ - throttle:
+ enabled: true
+ max-per-node: 1
+ option: 'project'
+
+ wrappers:
+ - build-name:
+ name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+ - timeout:
+ timeout: 180
+ abort: true
+
+ triggers:
+ - '{auto-trigger-name}'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ - '{SUT}-defaults'
+ - '{slave-label}-defaults'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: 'os-nosdn-nofeature-ha'
+ - string:
+ name: DOCKER_TAG
+ default: '{docker-tag}'
+ description: 'Tag to pull docker image'
+ - string:
+ name: CI_DEBUG
+ default: 'true'
+ description: "Show debug output information"
+
+ scm:
+ - git-scm:
+ credentials-id: '{ssh-credentials}'
+ refspec: ''
+ branch: '{branch}'
+
+ builders:
+ - description-setter:
+ description: "POD: $NODE_NAME"
+ - 'dovetail-cleanup'
+ - 'dovetail-{testsuite}'
+
+ publishers:
+ - archive:
+ artifacts: 'results/**/*'
+ allow-empty: true
+ fingerprint: true
+
+########################
+# builder macros
+########################
+- builder:
+ name: dovetail-basic
+ builders:
+ - shell:
+ !include-raw: ./dovetail-run.sh
+
+- builder:
+ name: dovetail-fetch-os-creds
+ builders:
+ - shell:
+ !include-raw: ../../utils/fetch_os_creds.sh
+
+- builder:
+ name: dovetail-cleanup
+ builders:
+ - shell:
+ !include-raw: ./dovetail-cleanup.sh
diff --git a/jjb/dovetail/dovetail-cleanup.sh b/jjb/dovetail/dovetail-cleanup.sh
new file mode 100755
index 000000000..297222bb3
--- /dev/null
+++ b/jjb/dovetail/dovetail-cleanup.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+[[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
+
+echo "Cleaning up docker containers/images..."
+# Remove previous running containers if exist
+if [[ ! -z $(docker ps -a | grep opnfv/dovetail) ]]; then
+ echo "Removing existing opnfv/dovetail containers..."
+ docker ps -a | grep opnfv/dovetail | awk '{print $1}' | xargs docker rm -f >$redirect
+fi
+
+# Remove existing images if exist
+if [[ ! -z $(docker images | grep opnfv/dovetail) ]]; then
+ echo "Docker images to remove:"
+ docker images | head -1 && docker images | grep opnfv/dovetail
+ image_tags=($(docker images | grep opnfv/dovetail | awk '{print $2}'))
+ for tag in "${image_tags[@]}"; do
+ echo "Removing docker image opnfv/dovetail:$tag..."
+ docker rmi opnfv/dovetail:$tag >$redirect
+ done
+fi
diff --git a/jjb/dovetail/dovetail-run.sh b/jjb/dovetail/dovetail-run.sh
new file mode 100755
index 000000000..6453425ce
--- /dev/null
+++ b/jjb/dovetail/dovetail-run.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+#the noun INSTALLER is used in community, here is just the example to run.
+#multi-platforms are supported.
+
+set -e
+[[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
+
+# labconfig is used only for joid
+labconfig=""
+sshkey=""
+if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
+ instack_mac=$(sudo virsh domiflist undercloud | grep default | \
+ grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
+ INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
+ sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
+ if [[ -n $(sudo iptables -L FORWARD |grep "REJECT"|grep "reject-with icmp-port-unreachable") ]]; then
+ #note: this happens only in opnfv-lf-pod1
+ sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
+ sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
+ fi
+elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
+ # If production lab then creds may be retrieved dynamically
+ # creds are on the jumphost, always in the same folder
+ labconfig="-v $LAB_CONFIG/admin-openrc:/home/opnfv/openrc"
+ # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
+ # replace the default one by the customized one provided by jenkins config
+fi
+
+# Set iptables rule to allow forwarding return traffic for container
+if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FORWARD | awk 'NR==3' | grep RETURN 2> ${redirect}; then
+ sudo iptables -I FORWARD -j RETURN
+fi
+
+opts="--privileged=true --rm"
+envs="-e CI_DEBUG=${CI_DEBUG} \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ -v /home/opnfv/dovetail/results:/home/opnfv/dovetail/results"
+
+# Pull the image with correct tag
+echo "Dovetail: Pulling image opnfv/dovetail:${DOCKER_TAG}"
+docker pull opnfv/dovetail:$DOCKER_TAG >$redirect
+
+# Run docker
+echo "Dovetail: docker running..."
+sudo docker run ${opts} ${envs} ${labconfig} ${sshkey} opnfv/dovetail:${DOCKER_TAG} \
+"/home/opnfv/dovetail/scripts/run.py"
+
+echo "Dovetail: store results..."
+sudo cp -r /home/opnfv/dovetail/results ./
+
+echo "Dovetail: done!"
diff --git a/jjb/opnfvdocs/project.cfg b/jjb/opnfvdocs/project.cfg
index ba977f92a..7f0236123 100644
--- a/jjb/opnfvdocs/project.cfg
+++ b/jjb/opnfvdocs/project.cfg
@@ -6,6 +6,8 @@ compass4nfv
copper
conductor
doctor
+domino
+dovetail
dpacc
escalator
fastpathmetrics
diff --git a/prototypes/bifrost/scripts/destroy-env.sh b/prototypes/bifrost/scripts/destroy-env.sh
index 86d7bc487..72ade5b14 100755
--- a/prototypes/bifrost/scripts/destroy-env.sh
+++ b/prototypes/bifrost/scripts/destroy-env.sh
@@ -37,8 +37,8 @@ rm -rf /var/log/libvirt/baremetal_logs/*.log
CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
if [ $CLEAN_DIB_IMAGES = "true" ]; then
- rm -rf /httpboot/*
- rm -rf /tftpboot/*
+ rm -rf /httpboot
+ rm -rf /tftpboot
fi
# remove VM disk images
diff --git a/utils/test/dashboard.tar.gz b/utils/test/dashboard.tar.gz
new file mode 100644
index 000000000..ef85f90da
--- /dev/null
+++ b/utils/test/dashboard.tar.gz
Binary files differ
diff --git a/utils/test/scripts/backup-db.sh b/utils/test/dashboard/backup-db.sh
index 35c3fbe5a..35c3fbe5a 100644
--- a/utils/test/scripts/backup-db.sh
+++ b/utils/test/dashboard/backup-db.sh
diff --git a/utils/test/dashboard/dashboard/__init__.py b/utils/test/dashboard/dashboard/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/dashboard/dashboard/__init__.py
diff --git a/utils/test/dashboard/dashboard/common/__init__.py b/utils/test/dashboard/dashboard/common/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/dashboard/dashboard/common/__init__.py
diff --git a/utils/test/scripts/shared_utils.py b/utils/test/dashboard/dashboard/common/elastic_access.py
index e90a17fa3..e90a17fa3 100644
--- a/utils/test/scripts/shared_utils.py
+++ b/utils/test/dashboard/dashboard/common/elastic_access.py
diff --git a/utils/test/scripts/logger_utils.py b/utils/test/dashboard/dashboard/common/logger_utils.py
index 25d28a582..183080810 100644
--- a/utils/test/scripts/logger_utils.py
+++ b/utils/test/dashboard/dashboard/common/logger_utils.py
@@ -57,9 +57,9 @@ class Logger(object):
return self.logger
-class KibanaDashboardLogger(Logger):
+class DashboardLogger(Logger):
file_path = '/var/log/kibana_dashboard'
def __init__(self, logger_name):
- super(KibanaDashboardLogger, self).__init__(logger_name)
+ super(DashboardLogger, self).__init__(logger_name)
diff --git a/utils/test/dashboard/dashboard/conf/__init__.py b/utils/test/dashboard/dashboard/conf/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/dashboard/dashboard/conf/__init__.py
diff --git a/utils/test/scripts/config.py b/utils/test/dashboard/dashboard/conf/config.py
index 2d447a7ba..2e0f1cabb 100644
--- a/utils/test/scripts/config.py
+++ b/utils/test/dashboard/dashboard/conf/config.py
@@ -22,7 +22,7 @@ class APIConfig:
"""
def __init__(self):
- self._default_config_location = "./config.ini"
+ self._default_config_location = "../etc/config.ini"
self.elastic_url = 'http://localhost:9200'
self.elastic_creds = None
self.destination = 'elasticsearch'
diff --git a/utils/test/scripts/testcases_parser.py b/utils/test/dashboard/dashboard/conf/testcases.py
index cf9599858..e120987dd 100644
--- a/utils/test/scripts/testcases_parser.py
+++ b/utils/test/dashboard/dashboard/conf/testcases.py
@@ -1,7 +1,7 @@
import yaml
-with open('./testcases.yaml') as f:
+with open('./functest/testcases.yaml') as f:
testcases_yaml = yaml.safe_load(f)
f.close()
diff --git a/utils/test/dashboard/dashboard/elastic2kibana/__init__.py b/utils/test/dashboard/dashboard/elastic2kibana/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/dashboard/dashboard/elastic2kibana/__init__.py
diff --git a/utils/test/scripts/create_kibana_dashboards.py b/utils/test/dashboard/dashboard/elastic2kibana/main.py
index 19d5b5e52..c1cbc308e 100644
--- a/utils/test/scripts/create_kibana_dashboards.py
+++ b/utils/test/dashboard/dashboard/elastic2kibana/main.py
@@ -4,12 +4,11 @@ import urlparse
import argparse
-import logger_utils
-import shared_utils
-import testcases_parser
-from config import APIConfig
+from common import logger_utils, elastic_access
+from conf import testcases
+from conf.config import APIConfig
-logger = logger_utils.KibanaDashboardLogger('elastic2kibana').get
+logger = logger_utils.DashboardLogger('elastic2kibana').get
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config-file",
@@ -18,6 +17,11 @@ parser.add_argument("-c", "--config-file",
args = parser.parse_args()
CONF = APIConfig().parse(args.config_file)
+base_elastic_url = CONF.elastic_url
+generate_inputs = CONF.is_js
+input_file_path = CONF.js_path
+kibana_url = CONF.kibana_url
+es_creds = CONF.elastic_creds
_installers = {'fuel', 'apex', 'compass', 'joid'}
@@ -53,7 +57,7 @@ class KibanaDashboard(dict):
for visualization in self._kibana_visualizations:
url = urlparse.urljoin(base_elastic_url, '/.kibana/visualization/{}'.format(visualization.id))
logger.debug("publishing visualization '{}'".format(url))
- shared_utils.publish_json(visualization, es_creds, url)
+ elastic_access.publish_json(visualization, es_creds, url)
def _construct_panels(self):
size_x = 6
@@ -131,7 +135,7 @@ class KibanaDashboard(dict):
def _publish(self):
url = urlparse.urljoin(base_elastic_url, '/.kibana/dashboard/{}'.format(self.id))
logger.debug("publishing dashboard '{}'".format(url))
- shared_utils.publish_json(self, es_creds, url)
+ elastic_access.publish_json(self, es_creds, url)
def publish(self):
self._publish_visualizations()
@@ -282,8 +286,8 @@ def _get_pods_and_scenarios(project_name, case_name, installer):
}
})
- elastic_data = shared_utils.get_elastic_docs(urlparse.urljoin(base_elastic_url, '/test_results/mongo2elastic'),
- es_creds, query_json)
+ elastic_data = elastic_access.get_elastic_docs(urlparse.urljoin(base_elastic_url, '/test_results/mongo2elastic'),
+ es_creds, query_json)
pods_and_scenarios = {}
@@ -312,7 +316,7 @@ def construct_dashboards():
:return: list of KibanaDashboards
"""
kibana_dashboards = []
- for project, case_dicts in testcases_parser.testcases_yaml.items():
+ for project, case_dicts in testcases.testcases_yaml.items():
for case in case_dicts:
case_name = case.get('name')
visualizations = case.get('visualizations')
@@ -359,13 +363,7 @@ def generate_js_inputs(js_file_path, kibana_url, dashboards):
js_file_fdesc.write(str(js_dict).replace("u'", "'"))
-if __name__ == '__main__':
- base_elastic_url = CONF.elastic_url
- generate_inputs = CONF.is_js
- input_file_path = CONF.js_path
- kibana_url = CONF.kibana_url
- es_creds = CONF.elastic_creds
-
+def main():
dashboards = construct_dashboards()
for kibana_dashboard in dashboards:
diff --git a/utils/test/dashboard/dashboard/elastic2kibana_main.py b/utils/test/dashboard/dashboard/elastic2kibana_main.py
new file mode 100644
index 000000000..3ec27cb40
--- /dev/null
+++ b/utils/test/dashboard/dashboard/elastic2kibana_main.py
@@ -0,0 +1,4 @@
+from elastic2kibana.main import main
+
+if __name__ == '__main__':
+ main()
diff --git a/utils/test/dashboard/dashboard/functest/__init__.py b/utils/test/dashboard/dashboard/functest/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/dashboard/dashboard/functest/__init__.py
diff --git a/utils/test/scripts/testcases.yaml b/utils/test/dashboard/dashboard/functest/testcases.yaml
index 9c33d2e6b..9c33d2e6b 100644
--- a/utils/test/scripts/testcases.yaml
+++ b/utils/test/dashboard/dashboard/functest/testcases.yaml
diff --git a/utils/test/dashboard/dashboard/mongo2elastic/__init__.py b/utils/test/dashboard/dashboard/mongo2elastic/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/dashboard/dashboard/mongo2elastic/__init__.py
diff --git a/utils/test/scripts/mongo2elastic_format.py b/utils/test/dashboard/dashboard/mongo2elastic/format.py
index ef485bae0..ef485bae0 100644
--- a/utils/test/scripts/mongo2elastic_format.py
+++ b/utils/test/dashboard/dashboard/mongo2elastic/format.py
diff --git a/utils/test/scripts/mongo_to_elasticsearch.py b/utils/test/dashboard/dashboard/mongo2elastic/main.py
index 777eda6ad..25b5320d7 100644
--- a/utils/test/scripts/mongo_to_elasticsearch.py
+++ b/utils/test/dashboard/dashboard/mongo2elastic/main.py
@@ -10,13 +10,12 @@ import uuid
import argparse
-import logger_utils
-import mongo2elastic_format
-import shared_utils
-import testcases_parser
-from config import APIConfig
+from common import logger_utils, elastic_access
+from conf import testcases
+from conf.config import APIConfig
+from mongo2elastic import format
-logger = logger_utils.KibanaDashboardLogger('mongo2elastic').get
+logger = logger_utils.DashboardLogger('mongo2elastic').get
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config-file",
@@ -50,7 +49,7 @@ class DocumentPublisher:
def format(self):
try:
if self._verify_document() and self.fmt:
- self.is_formatted = vars(mongo2elastic_format)[self.fmt](self.doc)
+ self.is_formatted = vars(format)[self.fmt](self.doc)
else:
self.is_formatted = False
except Exception:
@@ -65,7 +64,7 @@ class DocumentPublisher:
self._publish()
def _publish(self):
- status, data = shared_utils.publish_json(self.doc, self.creds, self.to)
+ status, data = elastic_access.publish_json(self.doc, self.creds, self.to)
if status > 300:
logger.error('Publish record[{}] failed, due to [{}]'
.format(self.doc, json.loads(data)['error']['reason']))
@@ -201,7 +200,7 @@ class DocumentsPublisher:
exit(-1)
def get_existed_docs(self):
- self.existed_docs = shared_utils.get_elastic_docs_by_days(self.elastic_url, self.creds, self.days)
+ self.existed_docs = elastic_access.get_elastic_docs_by_days(self.elastic_url, self.creds, self.days)
return self
def publish(self):
@@ -231,10 +230,10 @@ def main():
if to == 'elasticsearch':
to = base_elastic_url
- for project, case_dicts in testcases_parser.testcases_yaml.items():
+ for project, case_dicts in testcases.testcases_yaml.items():
for case_dict in case_dicts:
case = case_dict.get('name')
- fmt = testcases_parser.compose_format(case_dict.get('format'))
+ fmt = testcases.compose_format(case_dict.get('format'))
DocumentsPublisher(project,
case,
fmt,
@@ -242,7 +241,3 @@ def main():
base_elastic_url,
es_creds,
to).export().get_existed_docs().publish()
-
-
-if __name__ == '__main__':
- main() \ No newline at end of file
diff --git a/utils/test/dashboard/dashboard/mongo2elastic_main.py b/utils/test/dashboard/dashboard/mongo2elastic_main.py
new file mode 100644
index 000000000..141d8f3ab
--- /dev/null
+++ b/utils/test/dashboard/dashboard/mongo2elastic_main.py
@@ -0,0 +1,4 @@
+from mongo2elastic.main import main
+
+if __name__ == '__main__':
+ main()
diff --git a/utils/test/scripts/config.ini b/utils/test/dashboard/etc/config.ini
index 63d283dc8..b94ac7b4f 100644
--- a/utils/test/scripts/config.ini
+++ b/utils/test/dashboard/etc/config.ini
@@ -9,6 +9,6 @@ creds =
destination = elasticsearch
[kibana]
-url = http://10.63.243.17/kibana/app/kibana
+url = http://10.63.243.17:5601/app/kibana
js = true
js_path = /usr/share/nginx/html/kibana_dashboards/conf.js
diff --git a/utils/test/scripts/kibana_cleanup.py b/utils/test/dashboard/kibana_cleanup.py
index d87d9a285..9ce4994f5 100644
--- a/utils/test/scripts/kibana_cleanup.py
+++ b/utils/test/dashboard/kibana_cleanup.py
@@ -4,7 +4,7 @@ import urlparse
import argparse
-import shared_utils
+from dashboard.common import elastic_access
logger = logging.getLogger('clear_kibana')
logger.setLevel(logging.DEBUG)
@@ -14,10 +14,10 @@ logger.addHandler(file_handler)
def delete_all(url, es_creds):
- ids = shared_utils.get_elastic_docs(url, es_creds, body=None, field='_id')
+ ids = elastic_access.get_elastic_docs(url, es_creds, body=None, field='_id')
for id in ids:
del_url = '/'.join([url, id])
- shared_utils.delete_request(del_url, es_creds)
+ elastic_access.delete_request(del_url, es_creds)
if __name__ == '__main__':
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
index ef567f17d..90699bd61 100755
--- a/utils/test/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -24,6 +24,7 @@ logger = utils.getLogger("Status")
# Initialization
testValid = []
otherTestCases = []
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# init just tempest to get the list of scenarios
# as all the scenarios run Tempest
@@ -82,18 +83,26 @@ for version in conf.versions:
# For all the scenarios get results
for s, s_result in scenario_results.items():
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s:" %
+ (installer, version, s))
+ logger.debug("Scenario results: %s" % s_result)
+
# Green or Red light for a given scenario
nb_test_runnable_for_this_scenario = 0
scenario_score = 0
-
+ # url of the last jenkins log corresponding to a given
+ # scenario
+ s_url = ""
+ if len(s_result) > 0:
+ build_tag = s_result[len(s_result)-1]['build_tag']
+ logger.debug("Build tag: %s" % build_tag)
+ s_url = s_url = utils.getJenkinsUrl(build_tag)
+ logger.info("last jenkins url: %s" % s_url)
testCases2BeDisplayed = []
# Check if test case is runnable / installer, scenario
# for the test case used for Scenario validation
try:
- logger.info("---------------------------------")
- logger.info("installer %s, version %s, scenario %s:" %
- (installer, version, s))
-
# 1) Manage the test cases for the scenario validation
# concretely Tiers 0-3
for test_case in testValid:
@@ -185,7 +194,8 @@ for version in conf.versions:
else:
logger.info(">>>>> scenario OK, save the information")
s_status = "OK"
- path_validation_file = (conf.REPORTING_PATH + "/release/" + version +
+ path_validation_file = (conf.REPORTING_PATH +
+ "/release/" + version +
"/validated_scenario_history.txt")
with open(path_validation_file, "a") as f:
time_format = "%Y-%m-%d %H:%M"
@@ -193,8 +203,20 @@ for version in conf.versions:
";" + installer + ";" + s + "\n")
f.write(info)
- scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score,
- s_score_percent)
+ # Save daily results in a file
+ path_validation_file = (conf.REPORTING_PATH +
+ "/release/" + version +
+ "/scenario_history.txt")
+ with open(path_validation_file, "a") as f:
+ info = (reportingDate + "," + s + "," + installer +
+ "," + s_score + "," +
+ str(round(s_score_percent)) + "\n")
+ f.write(info)
+
+ scenario_result_criteria[s] = sr.ScenarioResult(s_status,
+ s_score,
+ s_score_percent,
+ s_url)
logger.info("--------------------------")
templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
@@ -209,7 +231,8 @@ for version in conf.versions:
items=items,
installer=installer,
period=conf.PERIOD,
- version=version)
+ version=version,
+ date=reportingDate)
with open(conf.REPORTING_PATH + "/release/" + version +
"/index-status-" + installer + ".html", "wb") as fh:
diff --git a/utils/test/reporting/functest/reportingConf.py b/utils/test/reporting/functest/reportingConf.py
index b0e4cf7a1..e1c4b61a8 100644
--- a/utils/test/reporting/functest/reportingConf.py
+++ b/utils/test/reporting/functest/reportingConf.py
@@ -12,10 +12,10 @@
installers = ["apex", "compass", "fuel", "joid"]
# list of test cases declared in testcases.yaml but that must not be
# taken into account for the scoring
-blacklist = ["ovno", "security_scan", 'odl-sfc']
+blacklist = ["ovno", "security_scan"]
# versions = ["brahmaputra", "master"]
versions = ["master", "colorado"]
-PERIOD = 50
+PERIOD = 10
MAX_SCENARIO_CRITERIA = 50
# get the last 5 test results to determinate the success criteria
NB_TESTS = 5
diff --git a/utils/test/reporting/functest/reportingUtils.py b/utils/test/reporting/functest/reportingUtils.py
index f02620430..9ba02e821 100644
--- a/utils/test/reporting/functest/reportingUtils.py
+++ b/utils/test/reporting/functest/reportingUtils.py
@@ -139,7 +139,7 @@ def getResult(testCase, installer, scenario, version):
# print "Nb test OK (last 10 days):"+ str(nbTestOk)
# check that we have at least 4 runs
if len(scenario_results) < 1:
- # No results available
+ # No results available
test_result_indicator = -1
elif nbTestOk < 1:
test_result_indicator = 0
@@ -158,3 +158,21 @@ def getResult(testCase, installer, scenario, version):
else:
test_result_indicator = 2
return test_result_indicator
+
+
+def getJenkinsUrl(build_tag):
+ # e.g. jenkins-functest-apex-apex-daily-colorado-daily-colorado-246
+ # id = 246
+ # note it is linked to jenkins format
+ # if this format changes...function to be adapted....
+ url_base = "https://build.opnfv.org/ci/view/functest/job/"
+ jenkins_url = ""
+ try:
+ build_id = [int(s) for s in build_tag.split("-") if s.isdigit()]
+ jenkins_path = filter(lambda c: not c.isdigit(), build_tag)
+ url_id = jenkins_path[8:-1] + "/" + str(build_id[0])
+ jenkins_url = url_base + url_id + "/console"
+ except:
+ print 'Impossible to get jenkins url:'
+
+ return jenkins_url
diff --git a/utils/test/reporting/functest/scenarioResult.py b/utils/test/reporting/functest/scenarioResult.py
index c6c337330..5a54eed96 100644
--- a/utils/test/reporting/functest/scenarioResult.py
+++ b/utils/test/reporting/functest/scenarioResult.py
@@ -10,10 +10,11 @@
class ScenarioResult(object):
- def __init__(self, status, score=0, score_percent=0):
+ def __init__(self, status, score=0, score_percent=0, url_lastrun=''):
self.status = status
self.score = score
self.score_percent = score_percent
+ self.url_lastrun = url_lastrun
def getStatus(self):
return self.status
@@ -22,4 +23,7 @@ class ScenarioResult(object):
return self.score
def getScorePercent(self):
- return self.score_percent \ No newline at end of file
+ return self.score_percent
+
+ def getUrlLastRun(self):
+ return self.url_lastrun
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
index 96240de5b..67c23491a 100644
--- a/utils/test/reporting/functest/template/index-status-tmpl.html
+++ b/utils/test/reporting/functest/template/index-status-tmpl.html
@@ -18,7 +18,7 @@
<body>
<div class="container">
<div class="masthead">
- <h3 class="text-muted">Functest status page ({{version}})</h3>
+ <h3 class="text-muted">Functest status page ({{version}}, {{date}})</h3>
<nav>
<ul class="nav nav-justified">
<li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
@@ -47,7 +47,7 @@
</tr>
{% for scenario,iteration in scenario_stats.iteritems() -%}
<tr class="tr-ok">
- <td>{{scenario}}</td>
+ <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
<td>{%if scenario_results[scenario].getScorePercent() < 8.3 -%}
<img src="../../img/gauge_0.png">
{%elif scenario_results[scenario].getScorePercent() < 16.7 -%}
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py
deleted file mode 100644
index 05c0c9392..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Orange
-# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py
deleted file mode 100755
index f5e3d9a6e..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py
+++ /dev/null
@@ -1,199 +0,0 @@
-#!/usr/bin/python
-#
-##############################################################################
-# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#
-# This script is used to build dashboard ready json results
-# It may be used for all the test case of the Bottlenecks project
-# a new method format_<Test_case>_for_dashboard(results)
-# v0.1: basic example with methods for Rubbos.
-#
-import os
-import requests
-import json
-
-
-def get_bottlenecks_cases():
- """
- get the list of the supported test cases
- TODO: update the list when adding a new test case for the dashboard
- """
- return ["rubbos", "tu1", "tu3"]
-
-
-def check_bottlenecks_case_exist(case):
- """
- check if the testcase exists
- if the test case is not defined or not declared in the list
- return False
- """
- bottlenecks_cases = get_bottlenecks_cases()
-
- if case is None or case not in bottlenecks_cases:
- return False
- else:
- return True
-
-
-def format_bottlenecks_for_dashboard(case, results):
- """
- generic method calling the method corresponding to the test case
- check that the testcase is properly declared first
- then build the call to the specific method
- """
- if check_bottlenecks_case_exist(case):
- cmd = "format_" + case + "_for_dashboard"
- res = globals()[cmd](results)
- else:
- res = []
- print "Test cases not declared"
- return res
-
-
-def format_rubbos_for_dashboard(results):
- """
- Post processing for the Rubbos test case
- """
- test_data = [{'description': 'Rubbos results'}]
-
- # Graph 1:Rubbos maximal throughput
- # ********************************
- #new_element = []
- #for each_result in results:
- # throughput_data = [record['throughput'] for record in each_result['details']]
- # new_element.append({'x': each_result['start_date'],
- # 'y': max(throughput_data)})
-
- #test_data.append({'name': "Rubbos max throughput",
- # 'info': {'type': "graph",
- # 'xlabel': 'time',
- # 'ylabel': 'maximal throughput'},
- # 'data_set': new_element})
-
- # Graph 2: Rubbos last record
- # ********************************
- new_element = []
- latest_result = results[-1]["details"]
- for data in latest_result:
- client_num = int(data["client"])
- throughput = int(data["throughput"])
- new_element.append({'x': client_num,
- 'y': throughput})
- test_data.append({'name': "Rubbos throughput vs client number",
- 'info': {'type': "graph",
- 'xlabel': 'client number',
- 'ylabel': 'throughput'},
- 'data_set': new_element})
-
- return test_data
-
-
-def format_tu1_for_dashboard(results):
- test_data = [{'description': 'Tu-1 performance result'}]
- line_element = []
- bar_element = {}
- last_result = results[-1]["details"]
- for key in sorted(last_result):
- bandwith = last_result[key]["Bandwidth"]
- pktsize = int(key)
- line_element.append({'x': pktsize,
- 'y': bandwith * 1000})
- bar_element[key] = bandwith * 1000
- # graph1, line
- test_data.append({'name': "VM2VM max single directional throughput",
- 'info': {'type': "graph",
- 'xlabel': 'pktsize',
- 'ylabel': 'bandwith(kpps)'},
- 'data_set': line_element})
- # graph2, bar
- test_data.append({'name': "VM2VM max single directional throughput",
- 'info': {"type": "bar"},
- 'data_set': bar_element})
- return test_data
-
-
-def format_tu3_for_dashboard(results):
- test_data = [{'description': 'Tu-3 performance result'}]
- new_element = []
- bar_element = {}
- last_result = results[-1]["details"]
- for key in sorted(last_result):
- bandwith = last_result[key]["Bandwidth"]
- pktsize = int(key)
- new_element.append({'x': pktsize,
- 'y': bandwith * 1000})
- bar_element[key] = bandwith * 1000
- # graph1, line
- test_data.append({'name': "VM2VM max bidirectional throughput",
- 'info': {'type': "graph",
- 'xlabel': 'pktsize',
- 'ylabel': 'bandwith(kpps)'},
- 'data_set': new_element})
- # graph2, bar
- test_data.append({'name': "VM2VM max single directional throughput",
- 'info': {"type": "bar"},
- 'data_set': bar_element})
- return test_data
-
-
-############################ For local test ################################
-
-def _read_sample_output(filename):
- curr_path = os.path.dirname(os.path.abspath(__file__))
- output = os.path.join(curr_path, filename)
- with open(output) as f:
- sample_output = f.read()
-
- result = json.loads(sample_output)
- return result
-
-
-# Copy form functest/testcases/Dashboard/dashboard_utils.py
-# and did some minor modification for local test.
-def _get_results(db_url, test_criteria):
- test_project = test_criteria["project"]
- testcase = test_criteria["testcase"]
-
- # Build headers
- headers = {'Content-Type': 'application/json'}
-
- # build the request
- # if criteria is all => remove criteria
- url = db_url + "/results?project=" + test_project + "&case=" + testcase
-
- # Send Request to Test DB
- myData = requests.get(url, headers=headers)
-
- # Get result as a json object
- myNewData = json.loads(myData.text)
-
- # Get results
- myDataResults = myNewData['test_results']
- return myDataResults
-
-#only for local test
-def _test():
- db_url = "http://testresults.opnfv.org/testapi"
- results = _get_results(db_url, {"project": "bottlenecks", "testcase": "rubbos"})
- test_result = format_rubbos_for_dashboard(results)
- print json.dumps(test_result, indent=4)
-
- results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu1"})
- #results = _read_sample_output("sample")
- #print json.dumps(results, indent=4)
- test_result = format_tu1_for_dashboard(results)
- print json.dumps(test_result, indent=4)
- results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu3"})
- test_result = format_tu3_for_dashboard(results)
- print json.dumps(test_result, indent=4)
-
-
-if __name__ == '__main__':
- _test()
-
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py
deleted file mode 100644
index 42c635846..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 Orange
-# morgan.richomme@orange.com
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# This script is used to retieve data from test DB
-# and format them into a json format adapted for a dashboard
-#
-# v0.1: basic example
-#
-import re
-import sys
-from functest2Dashboard import format_functest_for_dashboard, \
- check_functest_case_exist
-from yardstick2Dashboard import format_yardstick_for_dashboard, \
- check_yardstick_case_exist
-from vsperf2Dashboard import format_vsperf_for_dashboard, \
- check_vsperf_case_exist
-from bottlenecks2Dashboard import format_bottlenecks_for_dashboard, \
- check_bottlenecks_case_exist
-from qtip2Dashboard import format_qtip_for_dashboard, \
- check_qtip_case_exist
-from promise2Dashboard import format_promise_for_dashboard, \
- check_promise_case_exist
-from doctor2Dashboard import format_doctor_for_dashboard, \
- check_doctor_case_exist
-
-# any project test project wishing to provide dashboard ready values
-# must include at least 2 methods
-# - format_<Project>_for_dashboard
-# - check_<Project>_case_exist
-
-
-def check_dashboard_ready_project(test_project):
- # Check that the first param corresponds to a project
- # for whoch dashboard processing is available
- # print("test_project: %s" % test_project)
- project_module = 'opnfv_testapi.dashboard.'+test_project + '2Dashboard'
- return True if project_module in sys.modules else False
-
-
-def check_dashboard_ready_case(project, case):
- cmd = "check_" + project + "_case_exist"
- return globals()[cmd](case)
-
-
-def get_dashboard_projects():
- # Retrieve all the projects that could provide
- # Dashboard ready graphs
- # look in the releng repo
- # search all the project2Dashboard.py files
- # we assume that dashboard processing of project <Project>
- # is performed in the <Project>2Dashboard.py file
- projects = []
- cp = re.compile('opnfv_testapi\.dashboard\.(.+?)2Dashboard')
- for module in sys.modules:
- project = re.findall(cp, module)
- if project:
- projects.extend(project)
- return projects
-
-
-def get_dashboard_result(project, case, results=None):
- # get the dashboard ready results
- # paramters are:
- # project: project name
- # results: array of raw results pre-filterded
- # according to the parameters of the request
- cmd = "format_" + project + "_for_dashboard"
- return globals()[cmd](case, results)
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py
deleted file mode 100644
index 5b1f190a9..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py
+++ /dev/null
@@ -1,105 +0,0 @@
- #!/usr/bin/python
-#
-# Copyright (c) 2015 Orange
-# morgan.richomme@orange.com
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# This script is used to build dashboard ready json results
-# It may be used for all the test case of the Doctor project
-# a new method format_<Test_case>_for_dashboard(results)
-#
-import re
-import datetime
-
-
-def get_doctor_cases():
- """
- get the list of the supported test cases
- TODO: update the list when adding a new test case for the dashboard
- """
- return ["doctor-notification","doctor-mark-down"]
-
-
-def format_doctor_for_dashboard(case, results):
- """
- generic method calling the method corresponding to the test case
- check that the testcase is properly declared first
- then build the call to the specific method
- """
-
- if check_doctor_case_exist(case):
- # note we add _case because testcase and project had the same name
- # TODO refactoring...looks fine at the beginning wit only 1 project
- # not very ugly now and clearly not optimized...
- cmd = "format_" + case.replace('-','_') + "_case_for_dashboard"
- res = globals()[cmd](results)
- else:
- res = []
- return res
-
-
-def check_doctor_case_exist(case):
- """
- check if the testcase exists
- if the test case is not defined or not declared in the list
- return False
- """
- doctor_cases = get_doctor_cases()
-
- if (case is None or case not in doctor_cases):
- return False
- else:
- return True
-
-
-def format_doctor_mark_down_case_for_dashboard(results):
- """
- Post processing for the doctor test case
- """
- test_data = [{'description': 'doctor-mark-down results for Dashboard'}]
- return test_data
-
-
-def format_doctor_notification_case_for_dashboard(results):
- """
- Post processing for the doctor-notification test case
- """
- test_data = [{'description': 'doctor results for Dashboard'}]
- # Graph 1: (duration)=f(time)
- # ***************************************
- new_element = []
-
- # default duration 0:00:08.999904
- # consider only seconds => 09
- for data in results:
- t = data['details']['duration']
- new_element.append({'x': data['start_date'],
- 'y': t})
-
- test_data.append({'name': "doctor-notification duration ",
- 'info': {'type': "graph",
- 'xlabel': 'time (s)',
- 'ylabel': 'duration (s)'},
- 'data_set': new_element})
-
- # Graph 2: bar
- # ************
- nbTest = 0
- nbTestOk = 0
-
- for data in results:
- nbTest += 1
- if data['details']['status'] == "OK":
- nbTestOk += 1
-
- test_data.append({'name': "doctor-notification status",
- 'info': {"type": "bar"},
- 'data_set': [{'Nb tests': nbTest,
- 'Nb Success': nbTestOk}]})
-
- return test_data
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py
deleted file mode 100644
index 01697f73b..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py
+++ /dev/null
@@ -1,472 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 Orange
-# morgan.richomme@orange.com
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# This script is used to build dashboard ready json results
-# It may be used for all the test case of the Functest project
-# a new method format_<Test_case>_for_dashboard(results)
-# v0.1: basic example with methods for odl, Tempest, Rally and vPing
-#
-import datetime
-import re
-
-
-def get_functest_cases():
- """
- get the list of the supported test cases
- TODO: update the list when adding a new test case for the dashboard
- """
- return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "ODL",
- "ONOS", "Rally"]
-
-
-def format_functest_for_dashboard(case, results):
- """
- generic method calling the method corresponding to the test case
- check that the testcase is properly declared first
- then build the call to the specific method
- """
- if check_functest_case_exist(case):
- cmd = "format_" + case + "_for_dashboard"
- res = globals()[cmd](results)
- else:
- res = []
- print "Test cases not declared"
- return res
-
-
-def check_functest_case_exist(case):
- """
- check if the testcase exists
- if the test case is not defined or not declared in the list
- return False
- """
- functest_cases = get_functest_cases()
-
- if (case is None or case not in functest_cases):
- return False
- else:
- return True
-
-
-def format_status_for_dashboard(results):
- test_data = [{'description': 'Functest status'}]
-
- # define magic equation for the status....
- # 5 suites: vPing, odl, Tempest, vIMS, Rally
- # Which overall KPI make sense...
-
- # TODO to be done and discussed
- testcases = get_functest_cases()
- test_data.append({'nb test suite(s) run': len(testcases)-1})
- test_data.append({'vPing': '100%'})
- test_data.append({'VIM status': '82%'})
- test_data.append({'SDN Controllers': {'odl': '92%',
- 'onos': '95%',
- 'ocl': '93%'}})
- test_data.append({'VNF deployment': '95%'})
-
- return test_data
-
-
-def format_vIMS_for_dashboard(results):
- """
- Post processing for the vIMS test case
- """
- test_data = [{'description': 'vIMS results for Dashboard'}]
-
- # Graph 1: (duration_deployment_orchestrator,
- # duration_deployment_vnf,
- # duration_test) = f(time)
- # ********************************
- new_element = []
-
- for data in results:
- new_element.append({'x': data['start_date'],
- 'y1': data['details']['orchestrator']['duration'],
- 'y2': data['details']['vIMS']['duration'],
- 'y3': data['details']['sig_test']['duration']})
-
- test_data.append({'name': "vIMS orchestrator/VNF/test duration",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'orchestation deployment duration',
- 'y2label': 'vIMS deployment duration',
- 'y3label': 'vIMS test duration'},
- 'data_set': new_element})
-
- # Graph 2: (Nb test, nb failure, nb skipped)=f(time)
- # **************************************************
- new_element = []
-
- for data in results:
- # Retrieve all the tests
- nbTests = 0
- nbFailures = 0
- nbSkipped = 0
- vIMS_test = data['details']['sig_test']['result']
-
- for data_test in vIMS_test:
- # Calculate nb of tests run and nb of tests failed
- # vIMS_results = get_vIMSresults(vIMS_test)
- # print vIMS_results
- try:
- if data_test['result'] == "Passed":
- nbTests += 1
- elif data_test['result'] == "Failed":
- nbFailures += 1
- elif data_test['result'] == "Skipped":
- nbSkipped += 1
- except:
- nbTests = 0
-
- new_element.append({'x': data['start_date'],
- 'y1': nbTests,
- 'y2': nbFailures,
- 'y3': nbSkipped})
-
- test_data.append({'name': "vIMS nb tests passed/failed/skipped",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'Number of tests passed',
- 'y2label': 'Number of tests failed',
- 'y3label': 'Number of tests skipped'},
- 'data_set': new_element})
-
- # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed)
- # ********************************************************
- nbTests = 0
- nbFailures = 0
-
- for data in results:
- vIMS_test = data['details']['sig_test']['result']
-
- for data_test in vIMS_test:
- nbTestsOK = 0
- nbTestsKO = 0
-
- try:
- if data_test['result'] == "Passed":
- nbTestsOK += 1
- elif data_test['result'] == "Failed":
- nbTestsKO += 1
- except:
- nbTestsOK = 0
-
- nbTests += nbTestsOK + nbTestsKO
- nbFailures += nbTestsKO
-
- test_data.append({'name': "Total number of tests run/failure tests",
- 'info': {"type": "bar"},
- 'data_set': [{'Run': nbTests,
- 'Failed': nbFailures}]})
-
- return test_data
-
-
-def format_Tempest_for_dashboard(results):
- """
- Post processing for the Tempest test case
- """
- test_data = [{'description': 'Tempest results for Dashboard'}]
-
- # Graph 1: Test_Duration = f(time)
- # ********************************
- new_element = []
- for data in results:
- new_element.append({'x': data['start_date'],
- 'y': data['details']['duration']})
-
- test_data.append({'name': "Tempest duration",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'ylabel': 'duration (s)'},
- 'data_set': new_element})
-
- # Graph 2: (Nb test, nb failure)=f(time)
- # ***************************************
- new_element = []
- for data in results:
- new_element.append({'x': data['start_date'],
- 'y1': data['details']['tests'],
- 'y2': data['details']['failures']})
-
- test_data.append({'name': "Tempest nb tests/nb failures",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'Number of tests',
- 'y2label': 'Number of failures'},
- 'data_set': new_element})
-
- # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed)
- # ********************************************************
- nbTests = 0
- nbFailures = 0
-
- for data in results:
- nbTests += data['details']['tests']
- nbFailures += data['details']['failures']
-
- test_data.append({'name': "Total number of tests run/failure tests",
- 'info': {"type": "bar"},
- 'data_set': [{'Run': nbTests,
- 'Failed': nbFailures}]})
-
- # Graph 4: (Success rate)=f(time)
- # ***************************************
- new_element = []
- for data in results:
- try:
- diff = (int(data['details']['tests']) - int(data['details']['failures']))
- success_rate = 100*diff/int(data['details']['tests'])
- except:
- success_rate = 0
-
- new_element.append({'x': data['start_date'],
- 'y1': success_rate})
-
- test_data.append({'name': "Tempest success rate",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'Success rate'},
- 'data_set': new_element})
-
- return test_data
-
-
-def format_ODL_for_dashboard(results):
- """
- Post processing for the ODL test case
- """
- test_data = [{'description': 'ODL results for Dashboard'}]
-
- # Graph 1: (Nb test, nb failure)=f(time)
- # ***************************************
- new_element = []
-
- for data in results:
- odl_results = data['details']['details']
- nbFailures = 0
- for odl in odl_results:
- if (odl['test_status']['@status'] == "FAIL"):
- nbFailures += 1
- new_element.append({'x': data['start_date'],
- 'y1': len(odl_results),
- 'y2': nbFailures})
-
- test_data.append({'name': "ODL nb tests/nb failures",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'Number of tests',
- 'y2label': 'Number of failures'},
- 'data_set': new_element})
- return test_data
-
-
-def format_ONOS_for_dashboard(results):
- """
- Post processing for the odl test case
- """
- test_data = [{'description': 'ONOS results for Dashboard'}]
- # Graph 1: (duration FUNCvirtNet)=f(time)
- # ***************************************
- new_element = []
-
- # default duration 0:00:08.999904
- # consider only seconds => 09
- for data in results:
- t = data['details']['FUNCvirNet']['duration']
- h, m, s = re.split(':', t)
- s = round(float(s))
- new_duration = int(datetime.timedelta(hours=int(h),
- minutes=int(m),
- seconds=int(s)).total_seconds())
- new_element.append({'x': data['start_date'],
- 'y': new_duration})
-
- test_data.append({'name': "ONOS FUNCvirNet duration ",
- 'info': {'type': "graph",
- 'xlabel': 'time (s)',
- 'ylabel': 'duration (s)'},
- 'data_set': new_element})
-
- # Graph 2: (Nb test, nb failure)FuncvirtNet=f(time)
- # ***************************************
- new_element = []
-
- for data in results:
- onos_results = data['details']['FUNCvirNet']['status']
- nbFailures = 0
- for onos in onos_results:
- if (onos['Case result'] == "FAIL"):
- nbFailures += 1
- new_element.append({'x': data['start_date'],
- 'y1': len(onos_results),
- 'y2': nbFailures})
-
- test_data.append({'name': "ONOS FUNCvirNet nb tests/nb failures",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'Number of tests',
- 'y2label': 'Number of failures'},
- 'data_set': new_element})
-
- # Graph 3: (duration FUNCvirtNetL3)=f(time)
- # ***************************************
- new_element = []
-
- # default duration 0:00:08.999904
- # consider only seconds => 09
- for data in results:
- t = data['details']['FUNCvirNetL3']['duration']
- h, m, s = re.split(':', t)
- s = round(float(s))
- new_duration = int(datetime.timedelta(hours=int(h),
- minutes=int(m),
- seconds=int(s)).total_seconds())
- new_element.append({'x': data['start_date'],
- 'y': new_duration})
-
- test_data.append({'name': "ONOS FUNCvirNetL3 duration",
- 'info': {'type': "graph",
- 'xlabel': 'time (s)',
- 'ylabel': 'duration (s)'},
- 'data_set': new_element})
-
- # Graph 4: (Nb test, nb failure)FuncvirtNetL3=f(time)
- # ***************************************
- new_element = []
-
- for data in results:
- onos_results = data['details']['FUNCvirNetL3']['status']
- nbFailures = 0
- for onos in onos_results:
- if (onos['Case result'] == "FAIL"):
- nbFailures += 1
- new_element.append({'x': data['start_date'],
- 'y1': len(onos_results),
- 'y2': nbFailures})
-
- test_data.append({'name': "ONOS FUNCvirNetL3 nb tests/nb failures",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'Number of tests',
- 'y2label': 'Number of failures'},
- 'data_set': new_element})
- return test_data
-
-
-def format_Rally_for_dashboard(results):
- """
- Post processing for the Rally test case
- """
- test_data = [{'description': 'Rally results for Dashboard'}]
- # Graph 1: Test_Duration = f(time)
- # ********************************
- new_element = []
- for data in results:
- summary_cursor = len(data['details']) - 1
- new_element.append({'x': data['start_date'],
- 'y': int(data['details'][summary_cursor]['summary']['duration'])})
-
- test_data.append({'name': "rally duration",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'ylabel': 'duration (s)'},
- 'data_set': new_element})
-
- # Graph 2: Success rate = f(time)
- # ********************************
- new_element = []
- for data in results:
- new_element.append({'x': data['start_date'],
- 'y': float(data['details'][summary_cursor]['summary']['nb success'])})
-
- test_data.append({'name': "rally success rate",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'ylabel': 'success rate (%)'},
- 'data_set': new_element})
-
- return test_data
-
-
-def format_vPing_for_dashboard(results):
- """
- Post processing for the vPing test case
- """
- test_data = [{'description': 'vPing results for Dashboard'}]
-
- # Graph 1: Test_Duration = f(time)
- # ********************************
- new_element = []
- for data in results:
- new_element.append({'x': data['start_date'],
- 'y': data['details']['duration']})
-
- test_data.append({'name': "vPing duration",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'ylabel': 'duration (s)'},
- 'data_set': new_element})
-
- # Graph 2: bar
- # ************
- nbTest = 0
- nbTestOk = 0
-
- for data in results:
- nbTest += 1
- if data['details']['status'] == "OK":
- nbTestOk += 1
-
- test_data.append({'name': "vPing status",
- 'info': {"type": "bar"},
- 'data_set': [{'Nb tests': nbTest,
- 'Nb Success': nbTestOk}]})
-
- return test_data
-
-
-def format_vPing_userdata_for_dashboard(results):
- """
- Post processing for the vPing_userdata test case
- """
- test_data = [{'description': 'vPing_userdata results for Dashboard'}]
-
- # Graph 1: Test_Duration = f(time)
- # ********************************
- new_element = []
- for data in results:
- new_element.append({'x': data['start_date'],
- 'y': data['details']['duration']})
-
- test_data.append({'name': "vPing_userdata duration",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'ylabel': 'duration (s)'},
- 'data_set': new_element})
-
- # Graph 2: bar
- # ************
- nbTest = 0
- nbTestOk = 0
-
- for data in results:
- nbTest += 1
- if data['details']['status'] == "OK":
- nbTestOk += 1
-
- test_data.append({'name': "vPing_userdata status",
- 'info': {"type": "bar"},
- 'data_set': [{'Nb tests': nbTest,
- 'Nb Success': nbTestOk}]})
-
- return test_data
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py
deleted file mode 100644
index c96341f6d..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py
+++ /dev/null
@@ -1,100 +0,0 @@
- #!/usr/bin/python
-#
-# Copyright (c) 2015 Orange
-# morgan.richomme@orange.com
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# This script is used to build dashboard ready json results
-# It may be used for all the test case of the Promise project
-# a new method format_<Test_case>_for_dashboard(results)
-# v0.1: basic example with methods for odl, Tempest, Rally and vPing
-#
-
-def get_promise_cases():
- """
- get the list of the supported test cases
- TODO: update the list when adding a new test case for the dashboard
- """
- return ["promise"]
-
-
-def format_promise_for_dashboard(case, results):
- """
- generic method calling the method corresponding to the test case
- check that the testcase is properly declared first
- then build the call to the specific method
- """
- if check_promise_case_exist(case):
- # note we add _case because testcase and project had the same name
- # TODO refactoring...looks fine at the beginning wit only 1 project
- # not very ugly now and clearly not optimized...
- cmd = "format_" + case + "_case_for_dashboard"
- res = globals()[cmd](results)
- else:
- res = []
- print "Test cases not declared"
- return res
-
-
-def check_promise_case_exist(case):
- """
- check if the testcase exists
- if the test case is not defined or not declared in the list
- return False
- """
- promise_cases = get_promise_cases()
-
- if (case is None or case not in promise_cases):
- return False
- else:
- return True
-
-
-
-
-
-def format_promise_case_for_dashboard(results):
- """
- Post processing for the promise test case
- """
- test_data = [{'description': 'Promise results for Dashboard'}]
- # Graph 1: (duration)=f(time)
- # ***************************************
- new_element = []
-
- # default duration 0:00:08.999904
- # consider only seconds => 09
- for data in results:
- t = data['details']['duration']
- new_element.append({'x': data['creation_date'],
- 'y': t})
-
- test_data.append({'name': "Promise duration ",
- 'info': {'type': "graph",
- 'xlabel': 'time (s)',
- 'ylabel': 'duration (s)'},
- 'data_set': new_element})
-
- # Graph 2: (Nb test, nb failure)=f(time)
- # ***************************************
- new_element = []
-
- for data in results:
- promise_results = data['details']
- new_element.append({'x': data['creation_date'],
- 'y1': promise_results['tests'],
- 'y2': promise_results['failures']})
-
- test_data.append({'name': "Promise nb tests/nb failures",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'Number of tests',
- 'y2label': 'Number of failures'},
- 'data_set': new_element})
-
- return test_data
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py
deleted file mode 100644
index 6ceccd374..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/python
-
-##############################################################################
-# Copyright (c) 2015 Dell Inc and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-def get_qtip_cases():
- """
- get the list of the supported test cases
- TODO: update the list when adding a new test case for the dashboard
- """
- return ["compute_test_suite","storage_test_suite","network_test_suite"]
-
-def check_qtip_case_exist(case):
- """
- check if the testcase exists
- if the test case is not defined or not declared in the list
- return False
- """
- qtip_cases = get_qtip_cases()
- if (case is None or case not in qtip_cases):
- return False
- else:
- return True
-
-def format_qtip_for_dashboard(case, results):
- """
- generic method calling the method corresponding to the test case
- check that the testcase is properly declared first
- then build the call to the specific method
- """
- if check_qtip_case_exist(case):
- res = format_common_for_dashboard(case, results)
- else:
- res = []
- print "Test cases not declared"
- return res
-
-def format_common_for_dashboard(case, results):
- """
- Common post processing
- """
- test_data_description = case + " results for Dashboard"
- test_data = [{'description': test_data_description}]
-
- graph_name = ''
- if "network_test_suite" in case:
- graph_name = "Throughput index"
- else:
- graph_name = "Index"
-
- # Graph 1:
- # ********************************
- new_element = []
- for date, index in results:
- new_element.append({'x': date,
- 'y1': index,
- })
-
- test_data.append({'name': graph_name,
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'Index Number'},
- 'data_set': new_element})
-
- return test_data
-
-
-############################ For local test ################################
-import os
-import requests
-import json
-from collections import defaultdict
-
-def _get_results(db_url, testcase):
-
- testproject = testcase["project"]
- testcase = testcase["testcase"]
- resultarray = defaultdict()
- #header
- header = {'Content-Type': 'application/json'}
- #url
- url = db_url + "/results?project="+testproject+"&case="+testcase
- data = requests.get(url,header)
- datajson = data.json()
- for x in range(0, len(datajson['test_results'])):
-
- rawresults = datajson['test_results'][x]['details']
- index = rawresults['index']
- resultarray[str(datajson['test_results'][x]['start_date'])]=index
-
- return resultarray
-
-def _test():
-
- db_url = "http://testresults.opnfv.org/testapi"
- raw_result = defaultdict()
-
- raw_result = _get_results(db_url, {"project": "qtip", "testcase": "compute_test_suite"})
- resultitems= raw_result.items()
- result = format_qtip_for_dashboard("compute_test_suite", resultitems)
- print result
-
- raw_result = _get_results(db_url, {"project": "qtip", "testcase": "storage_test_suite"})
- resultitems= raw_result.items()
- result = format_qtip_for_dashboard("storage_test_suite", resultitems)
- print result
-
- raw_result = _get_results(db_url, {"project": "qtip", "testcase": "network_test_suite"})
- resultitems= raw_result.items()
- result = format_qtip_for_dashboard("network_test_suite", resultitems)
- print result
-
-if __name__ == '__main__':
- _test()
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py
deleted file mode 100755
index 5a6882da4..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2015 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"),
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-def get_vsperf_cases():
- """
- get the list of the supported test cases
- TODO: update the list when adding a new test case for the dashboard
- """
- return ["tput_ovsdpdk", "tput_ovs",
- "b2b_ovsdpdk", "b2b_ovs",
- "tput_mod_vlan_ovsdpdk", "tput_mod_vlan_ovs",
- "cont_ovsdpdk", "cont_ovs",
- "pvp_cont_ovsdpdkuser", "pvp_cont_ovsdpdkcuse", "pvp_cont_ovsvirtio",
- "pvvp_cont_ovsdpdkuser", "pvvp_cont_ovsdpdkcuse", "pvvp_cont_ovsvirtio",
- "scalability_ovsdpdk", "scalability_ovs",
- "pvp_tput_ovsdpdkuser", "pvp_tput_ovsdpdkcuse", "pvp_tput_ovsvirtio",
- "pvp_b2b_ovsdpdkuser", "pvp_b2b_ovsdpdkcuse", "pvp_b2b_ovsvirtio",
- "pvvp_tput_ovsdpdkuser", "pvvp_tput_ovsdpdkcuse", "pvvp_tput_ovsvirtio",
- "pvvp_b2b_ovsdpdkuser", "pvvp_b2b_ovsdpdkcuse", "pvvp_b2b_ovsvirtio",
- "cpu_load_ovsdpdk", "cpu_load_ovs",
- "mem_load_ovsdpdk", "mem_load_ovs"]
-
-
-def check_vsperf_case_exist(case):
- """
- check if the testcase exists
- if the test case is not defined or not declared in the list
- return False
- """
- vsperf_cases = get_vsperf_cases()
-
- if (case is None or case not in vsperf_cases):
- return False
- else:
- return True
-
-
-def format_vsperf_for_dashboard(case, results):
- """
- generic method calling the method corresponding to the test case
- check that the testcase is properly declared first
- then build the call to the specific method
- """
- if check_vsperf_case_exist(case):
- res = format_common_for_dashboard(case, results)
- else:
- res = []
- print "Test cases not declared"
- return res
-
-
-def format_common_for_dashboard(case, results):
- """
- Common post processing
- """
- test_data_description = case + " results for Dashboard"
- test_data = [{'description': test_data_description}]
-
- graph_name = ''
- if "b2b" in case:
- graph_name = "B2B frames"
- else:
- graph_name = "Rx frames per second"
-
- # Graph 1: Rx fps = f(time)
- # ********************************
- new_element = []
- for data in results:
- new_element.append({'x': data['start_date'],
- 'y1': data['details']['64'],
- 'y2': data['details']['128'],
- 'y3': data['details']['512'],
- 'y4': data['details']['1024'],
- 'y5': data['details']['1518']})
-
- test_data.append({'name': graph_name,
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'y1label': 'frame size 64B',
- 'y2label': 'frame size 128B',
- 'y3label': 'frame size 512B',
- 'y4label': 'frame size 1024B',
- 'y5label': 'frame size 1518B'},
- 'data_set': new_element})
-
- return test_data
-
-
-
-
-############################ For local test ################################
-import os
-
-def _test():
- ans = [{'start_date': '2015-09-12', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '26.804', '1024': '1097.284', '512': '178.137', '1518': '12635.860', '128': '100.564'}},
- {'start_date': '2015-09-33', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '16.804', '1024': '1087.284', '512': '168.137', '1518': '12625.860', '128': '99.564'}}]
-
- result = format_vsperf_for_dashboard("pvp_cont_ovsdpdkcuse", ans)
- print result
-
- result = format_vsperf_for_dashboard("b2b_ovsdpdk", ans)
- print result
-
- result = format_vsperf_for_dashboard("non_existing", ans)
- print result
-
-if __name__ == '__main__':
- _test()
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py
deleted file mode 100644
index 4df4b5007..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/python
-#
-##############################################################################
-# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#
-# This script is used to build dashboard ready json results
-# It may be used for all the test case of the Yardstick project
-# a new method format_<Test_case>_for_dashboard(results)
-# v0.1: basic example with methods for Ping, Iperf, Netperf, Pktgen,
-# Fio, Lmbench, Perf, Cyclictest.
-#
-
-def get_yardstick_cases():
- """
- get the list of the supported test cases
- TODO: update the list when adding a new test case for the dashboard
- """
- return ["Ping", "Iperf", "Netperf", "Pktgen", "Fio", "Lmbench",
- "Perf", "Cyclictest"]
-
-
-def format_yardstick_for_dashboard(case, results):
- """
- generic method calling the method corresponding to the test case
- check that the testcase is properly declared first
- then build the call to the specific method
- """
- if check_yardstick_case_exist(case):
- cmd = "format_" + case + "_for_dashboard"
- res = globals()[cmd](results)
- else:
- res = []
- print "Test cases not declared"
- return res
-
-
-def check_yardstick_case_exist(case):
- """
- check if the testcase exists
- if the test case is not defined or not declared in the list
- return False
- """
- yardstick_cases = get_yardstick_cases()
-
- if (case is None or case not in yardstick_cases):
- return False
- else:
- return True
-
-
-def _get_test_status_bar(results):
- nbTest = 0
- nbTestOk = 0
-
- for data in results:
- nbTest += 1
- records = [record for record in data['details']
- if "benchmark" in record
- and record["benchmark"]["errors"] != ""]
- if len(records) == 0:
- nbTestOk += 1
- return nbTest, nbTestOk
-
-
-def format_Ping_for_dashboard(results):
- """
- Post processing for the Ping test case
- """
- test_data = [{'description': 'Ping results for Dashboard'}]
-
- # Graph 1: Test_Duration = f(time)
- # ********************************
- new_element = []
- for data in results:
- records = [record["benchmark"]["data"]["rtt"]
- for record in data['details']
- if "benchmark" in record]
-
- avg_rtt = sum(records) / len(records)
- new_element.append({'x': data['start_date'],
- 'y': avg_rtt})
-
- test_data.append({'name': "ping duration",
- 'info': {'type': "graph",
- 'xlabel': 'time',
- 'ylabel': 'duration (s)'},
- 'data_set': new_element})
-
- # Graph 2: bar
- # ************
- nbTest, nbTestOk = _get_test_status_bar(results)
-
- test_data.append({'name': "ping status",
- 'info': {"type": "bar"},
- 'data_set': [{'Nb tests': nbTest,
- 'Nb Success': nbTestOk}]})
-
- return test_data
-
-
-def format_iperf_for_dashboard(results):
- """
- Post processing for the Iperf test case
- """
- test_data = [{'description': 'Iperf results for Dashboard'}]
- return test_data
-
-
-def format_netperf_for_dashboard(results):
- """
- Post processing for the Netperf test case
- """
- test_data = [{'description': 'Netperf results for Dashboard'}]
- return test_data
-
-
-def format_pktgen_for_dashboard(results):
- """
- Post processing for the Pktgen test case
- """
- test_data = [{'description': 'Pktgen results for Dashboard'}]
- return test_data
-
-
-def format_fio_for_dashboard(results):
- """
- Post processing for the Fio test case
- """
- test_data = [{'description': 'Fio results for Dashboard'}]
- return test_data
-
-
-def format_lmbench_for_dashboard(results):
- """
- Post processing for the Lmbench test case
- """
- test_data = [{'description': 'Lmbench results for Dashboard'}]
- return test_data
-
-
-def format_perf_for_dashboard(results):
- """
- Post processing for the Perf test case
- """
- test_data = [{'description': 'Perf results for Dashboard'}]
- return test_data
-
-
-def format_cyclictest_for_dashboard(results):
- """
- Post processing for the Cyclictest test case
- """
- test_data = [{'description': 'Cyclictest results for Dashboard'}]
- return test_data
-
-
-############################ For local test ################################
-import json
-import os
-import requests
-
-def _read_sample_output(filename):
- curr_path = os.path.dirname(os.path.abspath(__file__))
- output = os.path.join(curr_path, filename)
- with open(output) as f:
- sample_output = f.read()
-
- result = json.loads(sample_output)
- return result
-
-# Copy form functest/testcases/Dashboard/dashboard_utils.py
-# and did some minor modification for local test.
-def _get_results(db_url, test_criteria):
-
- test_project = test_criteria["project"]
- testcase = test_criteria["testcase"]
-
- # Build headers
- headers = {'Content-Type': 'application/json'}
-
- # build the request
- # if criteria is all => remove criteria
- url = db_url + "/results?project=" + test_project + "&case=" + testcase
-
- # Send Request to Test DB
- myData = requests.get(url, headers=headers)
-
- # Get result as a json object
- myNewData = json.loads(myData.text)
-
- # Get results
- myDataResults = myNewData['test_results']
-
- return myDataResults
-
-def _test():
- db_url = "http://213.77.62.197"
- result = _get_results(db_url,
- {"project": "yardstick", "testcase": "Ping"})
- print format_ping_for_dashboard(result)
-
-if __name__ == '__main__':
- _test()
diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/dashboard_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/dashboard_handlers.py
deleted file mode 100644
index 303e8d164..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/resources/dashboard_handlers.py
+++ /dev/null
@@ -1,120 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Orange
-# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-from tornado.web import HTTPError
-
-from opnfv_testapi.common.constants import HTTP_NOT_FOUND
-from opnfv_testapi.dashboard.dashboard_utils import \
- check_dashboard_ready_project, check_dashboard_ready_case, \
- get_dashboard_result, get_dashboard_projects
-from opnfv_testapi.resources.result_handlers import GenericResultHandler
-from opnfv_testapi.resources.result_models import TestResult
-from opnfv_testapi.tornado_swagger import swagger
-
-
-class GenericDashboardHandler(GenericResultHandler):
- def __init__(self, application, request, **kwargs):
- super(GenericDashboardHandler, self).__init__(application,
- request,
- **kwargs)
- self.table = self.db_results
- self.table_cls = TestResult
-
-
-class DashboardHandler(GenericDashboardHandler):
- @swagger.operation(nickname='query')
- def get(self):
- """
- @description: Retrieve dashboard ready result(s)
- for a test project
- @notes: Retrieve dashboard ready result(s) for a test project
- Available filters for this request are :
- - project : project name
- - case : case name
- - pod : pod name
- - version : platform version (Arno-R1, ...)
- - installer (fuel, ...)
- - period : x (x last days)
-
- GET /dashboard?project=functest&case=vPing&version=Colorado \
- &pod=pod_name&period=15
- @rtype: L{string}
- @param pod: pod name
- @type pod: L{string}
- @in pod: query
- @required pod: False
- @param project: project name
- @type project: L{string}
- @in project: query
- @required project: True
- @param case: case name
- @type case: L{string}
- @in case: query
- @required case: True
- @param version: i.e. Colorado
- @type version: L{string}
- @in version: query
- @required version: False
- @param installer: fuel/apex/joid/compass
- @type installer: L{string}
- @in installer: query
- @required installer: False
- @param period: last days
- @type period: L{string}
- @in period: query
- @required period: False
- @return 200: test result exist
- @raise 400: period is not in
- @raise 404: project or case name missing,
- or project or case is not dashboard ready
- """
-
- project_arg = self.get_query_argument("project", None)
- case_arg = self.get_query_argument("case", None)
-
- # on /dashboard retrieve the list of projects and testcases
- # ready for dashboard
- if project_arg is None:
- raise HTTPError(HTTP_NOT_FOUND, "Project name missing")
-
- if not check_dashboard_ready_project(project_arg):
- raise HTTPError(HTTP_NOT_FOUND,
- 'Project [{}] not dashboard ready'
- .format(project_arg))
-
- if case_arg is None:
- raise HTTPError(
- HTTP_NOT_FOUND,
- 'Test case missing for project [{}]'.format(project_arg))
-
- if not check_dashboard_ready_case(project_arg, case_arg):
- raise HTTPError(
- HTTP_NOT_FOUND,
- 'Test case [{}] not dashboard ready for project [{}]'
- .format(case_arg, project_arg))
-
- # special case of status for project
- if case_arg == 'status':
- self.finish_request(get_dashboard_result(project_arg, case_arg))
- else:
- def get_result(res, project, case):
- return get_dashboard_result(project, case, res)
-
- self._list(self.set_query(), get_result, project_arg, case_arg)
-
-
-class DashboardProjectsHandler(GenericDashboardHandler):
- @swagger.operation(nickname='list')
- def get(self):
- """
- @description: Retrieve dashboard ready project(s)
- @rtype: L{list}
- @return 200: return all dashboard ready project(s)
- """
- self.finish_request(get_dashboard_projects())
diff --git a/utils/test/result_collection_api/opnfv_testapi/router/url_mappings.py b/utils/test/result_collection_api/opnfv_testapi/router/url_mappings.py
index 695c27de1..eb648ecbb 100644
--- a/utils/test/result_collection_api/opnfv_testapi/router/url_mappings.py
+++ b/utils/test/result_collection_api/opnfv_testapi/router/url_mappings.py
@@ -14,8 +14,6 @@ from opnfv_testapi.resources.project_handlers import ProjectCLHandler, \
ProjectGURHandler
from opnfv_testapi.resources.result_handlers import ResultsCLHandler, \
ResultsGURHandler
-from opnfv_testapi.resources.dashboard_handlers import DashboardHandler, \
- DashboardProjectsHandler
mappings = [
@@ -47,12 +45,4 @@ mappings = [
# (project, case, and pod)
(r"/api/v1/results", ResultsCLHandler),
(r"/api/v1/results/([^/]+)", ResultsGURHandler),
-
- # Method to manage Dashboard ready results
- # GET /dashboard?project=functest&case=vPing&pod=opnfv-jump2
- # => get results in dasboard ready format
- # get /dashboard
- # => get the list of project with dashboard ready results
- (r"/dashboard/v1/results", DashboardHandler),
- (r"/dashboard/v1/projects", DashboardProjectsHandler),
]
diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard.py
deleted file mode 100644
index 27ec76385..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard.py
+++ /dev/null
@@ -1,78 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 ZTE Corporation
-# feng.xiaowei@zte.com.cn
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-
-from opnfv_testapi.common.constants import HTTP_NOT_FOUND, HTTP_OK
-
-from test_result import TestResultBase
-
-
-class TestDashboardBase(TestResultBase):
- def setUp(self):
- super(TestDashboardBase, self).setUp()
- self.basePath = '/dashboard/v1/results'
- self.create_help('/api/v1/results', self.req_d)
- self.create_help('/api/v1/results', self.req_d)
- self.list_res = None
-
-
-class TestDashboardQuery(TestDashboardBase):
- def test_projectMissing(self):
- code, body = self.query(self._set_query(project='missing'))
- self.assertEqual(code, HTTP_NOT_FOUND)
- self.assertIn('Project name missing', body)
-
- def test_projectNotReady(self):
- code, body = self.query(self._set_query(project='notReadyProject'))
- self.assertEqual(code, HTTP_NOT_FOUND)
- self.assertIn('Project [notReadyProject] not dashboard ready', body)
-
- def test_testcaseMissing(self):
- code, body = self.query(self._set_query(case='missing'))
- self.assertEqual(code, HTTP_NOT_FOUND)
- self.assertIn('Test case missing for project [{}]'
- .format(self.project),
- body)
-
- def test_testcaseNotReady(self):
- code, body = self.query(self._set_query(case='notReadyCase'))
- self.assertEqual(code, HTTP_NOT_FOUND)
- self.assertIn(
- 'Test case [notReadyCase] not dashboard ready for project [%s]'
- % self.project,
- body)
-
- def test_success(self):
- code, body = self.query(self._set_query())
- self.assertEqual(code, HTTP_OK)
- self.assertIn('{"description": "vPing results for Dashboard"}', body)
-
- def test_caseIsStatus(self):
- code, body = self.query(self._set_query(case='status'))
- self.assertEqual(code, HTTP_OK)
- self.assertIn('{"description": "Functest status"}', body)
-
- def _set_query(self, project=None, case=None):
- uri = ''
- for k, v in list(locals().iteritems()):
- if k == 'self' or k == 'uri':
- continue
- if v is None:
- v = self.__getattribute__(k)
- if v != 'missing':
- uri += '{}={}&'.format(k, v)
- uri += 'pod={}&'.format(self.pod)
- uri += 'version={}&'.format(self.version)
- uri += 'installer={}&'.format(self.installer)
- uri += 'period={}&'.format(5)
- return uri[0:-1]
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard_project.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard_project.py
deleted file mode 100644
index f9d2015be..000000000
--- a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard_project.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import json
-
-from opnfv_testapi.common.constants import HTTP_OK
-from test_base import TestBase
-
-
-class TestDashboardProjectBase(TestBase):
- def setUp(self):
- super(TestDashboardProjectBase, self).setUp()
- self.basePath = '/dashboard/v1/projects'
- self.list_res = None
- self.projects = ['bottlenecks', 'doctor', 'functest',
- 'promise', 'qtip', 'vsperf', 'yardstick']
-
-
-class TestDashboardProjectGet(TestDashboardProjectBase):
- def test_list(self):
- code, body = self.get()
- self.assertEqual(code, HTTP_OK)
- self.assertItemsEqual(self.projects, json.loads(body))