summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--jjb/barometer/barometer-build.sh21
-rw-r--r--jjb/barometer/barometer-upload-artifact.sh46
-rw-r--r--jjb/barometer/barometer.yml20
-rwxr-xr-xjjb/ci_gate_security/anteater-clone-all-repos.sh33
-rw-r--r--jjb/ci_gate_security/anteater-report-to-gerrit.sh2
-rw-r--r--jjb/ci_gate_security/anteater-security-audit-weekly.sh37
-rw-r--r--jjb/ci_gate_security/anteater-security-audit.sh16
-rw-r--r--jjb/ci_gate_security/opnfv-ci-gate-security.yml32
-rw-r--r--jjb/compass4nfv/compass-ci-jobs.yml69
-rw-r--r--jjb/compass4nfv/compass-dovetail-jobs.yml2
-rw-r--r--jjb/compass4nfv/compass-verify-jobs.yml6
-rw-r--r--jjb/doctor/doctor.yml33
-rw-r--r--jjb/dovetail/dovetail-ci-jobs.yml2
-rwxr-xr-xjjb/dovetail/dovetail-cleanup.sh7
-rwxr-xr-xjjb/dovetail/dovetail-run.sh22
-rw-r--r--jjb/fuel/fuel-daily-jobs.yml4
-rwxr-xr-xjjb/fuel/fuel-deploy.sh12
-rwxr-xr-xjjb/fuel/fuel-download-artifact.sh3
-rw-r--r--jjb/global/installer-params.yml8
-rw-r--r--jjb/global/releng-macros.yml9
-rw-r--r--jjb/netready/netready.yml2
-rw-r--r--jjb/releng/opnfv-docker.sh18
-rw-r--r--jjb/releng/opnfv-docker.yml4
-rw-r--r--jjb/releng/testapi-docker-deploy.sh2
-rw-r--r--jjb/storperf/storperf.yml2
-rw-r--r--jjb/yardstick/yardstick-daily-jobs.yml2
-rwxr-xr-xjjb/yardstick/yardstick-daily.sh3
-rw-r--r--prototypes/xci/README.rst17
-rwxr-xr-xprototypes/xci/config/pinned-versions2
-rwxr-xr-xprototypes/xci/config/user-vars3
-rw-r--r--prototypes/xci/file/ansible-role-requirements.yml4
-rw-r--r--prototypes/xci/playbooks/provision-vm-nodes.yml9
-rw-r--r--prototypes/xci/var/opnfv.yml1
-rwxr-xr-xutils/fetch_os_creds.sh70
-rwxr-xr-xutils/test/reporting/functest/reporting-status.py1
-rwxr-xr-xutils/test/reporting/functest/reporting-tempest.py19
-rw-r--r--utils/test/reporting/utils/reporting_utils.py27
-rw-r--r--utils/test/testapi/3rd_party/static/testapi-ui/app.js16
-rw-r--r--utils/test/testapi/3rd_party/static/testapi-ui/components/results/resultsController.js4
-rw-r--r--utils/test/testapi/3rd_party/static/testapi-ui/config.json2
-rw-r--r--utils/test/testapi/etc/config.ini10
-rw-r--r--utils/test/testapi/htmlize/htmlize.py4
-rw-r--r--utils/test/testapi/opnfv_testapi/common/config.py5
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/handlers.py41
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/result_handlers.py21
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py59
46 files changed, 592 insertions, 140 deletions
diff --git a/jjb/barometer/barometer-build.sh b/jjb/barometer/barometer-build.sh
new file mode 100644
index 000000000..e40841bc2
--- /dev/null
+++ b/jjb/barometer/barometer-build.sh
@@ -0,0 +1,21 @@
+set -x
+
+OPNFV_ARTIFACT_VERSION=$(date -u +"%Y-%m-%d_%H-%M-%S")
+OPNFV_ARTIFACT_URL="$GS_URL/$OPNFV_ARTIFACT_VERSION/"
+
+# log info to console
+echo "Starting the build of Barometer RPMs"
+echo "------------------------------------"
+echo
+
+cd ci
+./install_dependencies.sh
+./build_rpm.sh
+cd $WORKSPACE
+
+# save information regarding artifact into file
+(
+ echo "OPNFV_ARTIFACT_VERSION=$OPNFV_ARTIFACT_VERSION"
+ echo "OPNFV_ARTIFACT_URL=$OPNFV_ARTIFACT_URL"
+) > $WORKSPACE/opnfv.properties
+
diff --git a/jjb/barometer/barometer-upload-artifact.sh b/jjb/barometer/barometer-upload-artifact.sh
new file mode 100644
index 000000000..887c4924e
--- /dev/null
+++ b/jjb/barometer/barometer-upload-artifact.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+set -o nounset
+set -o pipefail
+
+RPM_WORKDIR=$WORKSPACE/rpmbuild
+RPM_DIR=$RPM_WORKDIR/RPMS/x86_64/
+cd $WORKSPACE/
+
+# source the opnfv.properties to get ARTIFACT_VERSION
+source $WORKSPACE/opnfv.properties
+
+# upload property files
+gsutil cp $WORKSPACE/opnfv.properties gs://$OPNFV_ARTIFACT_URL/opnfv.properties > gsutil.properties.log 2>&1
+gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log 2>&1
+
+echo "Uploading the barometer RPMs to artifacts.opnfv.org"
+echo "---------------------------------------------------"
+echo
+
+gsutil -m cp -r $RPM_DIR/* $OPNFV_ARTIFACT_URL > $WORKSPACE/gsutil.log 2>&1
+
+# Check if the RPMs were pushed
+gsutil ls $OPNFV_ARTIFACT_URL > /dev/null 2>&1
+if [[ $? -ne 0 ]]; then
+ echo "Problem while uploading barometer RPMs to $OPNFV_ARTIFACT_URL!"
+ echo "Check log $WORKSPACE/gsutil.log on the appropriate build server"
+ exit 1
+fi
+
+gsutil -m setmeta \
+ -h "Cache-Control:private, max-age=0, no-transform" \
+ gs://$OPNFV_ARTIFACT_URL/*.rpm > /dev/null 2>&1
+
+gsutil -m setmeta \
+ -h "Content-Type:text/html" \
+ -h "Cache-Control:private, max-age=0, no-transform" \
+ gs://$GS_URL/latest.properties \
+ gs://$OPNFV_ARTIFACT_URL/opnfv.properties > /dev/null 2>&1
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
+echo "Artifact is available at $OPNFV_ARTIFACT_URL"
+
+#cleanup the RPM repo from the build machine.
+rm -rf $RPM_WORKDIR
diff --git a/jjb/barometer/barometer.yml b/jjb/barometer/barometer.yml
index 68b8a04c0..c8fb9e25b 100644
--- a/jjb/barometer/barometer.yml
+++ b/jjb/barometer/barometer.yml
@@ -144,8 +144,18 @@
- timed: '@midnight'
builders:
- - shell: |
- pwd
- cd ci
- ./install_dependencies.sh
- ./build_rpm.sh
+ - shell:
+ !include-raw-escape: ./barometer-build.sh
+ - shell:
+ !include-raw-escape: ./barometer-upload-artifact.sh
+
+########################
+# parameter macros
+########################
+- parameter:
+ name: barometer-project-parameter
+ parameters:
+ - string:
+ name: GS_URL
+ default: '$GS_BASE{gs-pathname}'
+ description: "URL to Google Storage."
diff --git a/jjb/ci_gate_security/anteater-clone-all-repos.sh b/jjb/ci_gate_security/anteater-clone-all-repos.sh
new file mode 100755
index 000000000..8a9e73d85
--- /dev/null
+++ b/jjb/ci_gate_security/anteater-clone-all-repos.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+set -o errexit
+set -o pipefail
+set -o nounset
+export PATH=$PATH:/usr/local/bin/
+
+
+#WORKSPACE="$(pwd)"
+
+cd $WORKSPACE
+if [ ! -d "$WORKSPACE/allrepos" ]; then
+ mkdir $WORKSPACE/allrepos
+fi
+
+cd $WORKSPACE/allrepos
+
+declare -a PROJECT_LIST
+EXCLUDE_PROJECTS="All-Projects|All-Users|securedlab"
+
+PROJECT_LIST=($(ssh gerrit.opnfv.org -p 29418 gerrit ls-projects | egrep -v $EXCLUDE_PROJECTS))
+echo "PROJECT_LIST=(${PROJECT_LIST[*]})" > $WORKSPACE/opnfv-projects.sh
+
+for PROJECT in ${PROJECT_LIST[@]}; do
+ echo "> Cloning $PROJECT"
+ if [ ! -d "$PROJECT" ]; then
+ git clone "https://gerrit.opnfv.org/gerrit/$PROJECT.git"
+ else
+ pushd "$PROJECT" > /dev/null
+ git pull -f
+ popd > /dev/null
+ fi
+done
diff --git a/jjb/ci_gate_security/anteater-report-to-gerrit.sh b/jjb/ci_gate_security/anteater-report-to-gerrit.sh
index 71c5a0679..fc3018fb4 100644
--- a/jjb/ci_gate_security/anteater-report-to-gerrit.sh
+++ b/jjb/ci_gate_security/anteater-report-to-gerrit.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-set -o errexit
+# SPDX-license-identifier: Apache-2.0
set -o pipefail
export PATH=$PATH:/usr/local/bin/
EXITSTATUS=0
diff --git a/jjb/ci_gate_security/anteater-security-audit-weekly.sh b/jjb/ci_gate_security/anteater-security-audit-weekly.sh
new file mode 100644
index 000000000..436a173bc
--- /dev/null
+++ b/jjb/ci_gate_security/anteater-security-audit-weekly.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+
+echo "--------------------------------------------------------"
+vols="-v $WORKSPACE/allrepos/:/home/opnfv/anteater/allrepos/"
+echo "Pulling releng-anteater docker image"
+echo "--------------------------------------------------------"
+docker pull opnfv/releng-anteater
+echo "--------------------------------------------------------"
+cmd="docker run -id $vols opnfv/releng-anteater /bin/bash"
+echo "Running docker command $cmd"
+container_id=$($cmd)
+echo "Container ID is $container_id"
+source $WORKSPACE/opnfv-projects.sh
+for project in "${PROJECT_LIST[@]}"
+
+do
+ cmd="anteater --project testproj --path /home/opnfv/anteater/allrepos/$project"
+ echo "Executing command inside container"
+ echo "$cmd"
+ echo "--------------------------------------------------------"
+ docker exec $container_id $cmd > $WORKSPACE/"$project".securityaudit.log 2>&1
+done
+
+exit_code=$?
+echo "--------------------------------------------------------"
+echo "Stopping docker container with ID $container_id"
+docker stop $container_id
+
+
+#gsutil cp $WORKSPACE/securityaudit.log \
+# gs://$GS_URL/$PROJECT-securityaudit-weekly.log 2>&1
+#
+#gsutil -m setmeta \
+# -h "Content-Type:text/html" \
+# -h "Cache-Control:private, max-age=0, no-transform" \
+# gs://$GS_URL/$PROJECT-securityaudit-weekly.log > /dev/null 2>&1
diff --git a/jjb/ci_gate_security/anteater-security-audit.sh b/jjb/ci_gate_security/anteater-security-audit.sh
index d5c0e407c..9bd3cc34f 100644
--- a/jjb/ci_gate_security/anteater-security-audit.sh
+++ b/jjb/ci_gate_security/anteater-security-audit.sh
@@ -15,18 +15,14 @@ echo "--------------------------------------------------------"
docker pull opnfv/releng-anteater
echo "--------------------------------------------------------"
-cmd="sudo docker run --privileged=true -id $envs $vols opnfv/releng-anteater /bin/bash"
-echo "Running docker command $cmd"
-container_id=$($cmd)
-echo "Container ID is $container_id"
-cmd="anteater --project $PROJECT --patchset /home/opnfv/anteater/$PROJECT/patchset"
-echo "Executing command inside container"
+cmd="docker run -i $envs $vols --rm opnfv/releng-anteater \
+/home/opnfv/venv/bin/anteater --project $PROJECT --patchset /home/opnfv/anteater/$PROJECT/patchset"
+echo "Running docker container"
echo "$cmd"
-echo "--------------------------------------------------------"
-docker exec $container_id $cmd > $WORKSPACE/securityaudit.log 2>&1
+$cmd > $WORKSPACE/securityaudit.log 2>&1
exit_code=$?
echo "--------------------------------------------------------"
-echo "Stopping docker container with ID $container_id"
-docker stop $container_id
+echo "Docker container exited with code: $exit_code"
+echo "--------------------------------------------------------"
cat securityaudit.log
exit 0
diff --git a/jjb/ci_gate_security/opnfv-ci-gate-security.yml b/jjb/ci_gate_security/opnfv-ci-gate-security.yml
index e2ad03eae..489dbc507 100644
--- a/jjb/ci_gate_security/opnfv-ci-gate-security.yml
+++ b/jjb/ci_gate_security/opnfv-ci-gate-security.yml
@@ -1,3 +1,4 @@
+# SPDX-license-identifier: Apache-2.0
########################
# Job configuration for opnfv-anteater (security audit)
########################
@@ -9,6 +10,7 @@
jobs:
- 'opnfv-security-audit-verify-{stream}'
+ - 'opnfv-security-audit-weekly-{stream}'
stream:
- master:
@@ -20,6 +22,26 @@
# job templates
########################
- job-template:
+ name: 'opnfv-security-audit-weekly-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ parameters:
+ - label:
+ name: SLAVE_LABEL
+ default: 'ericsson-build3'
+ description: 'Slave label on Jenkins'
+ - project-parameter:
+ project: releng
+ branch: '{branch}'
+
+ triggers:
+ - timed: '@weekly'
+
+ builders:
+ - anteater-security-audit-weekly
+
+- job-template:
name: 'opnfv-security-audit-verify-{stream}'
disabled: '{obj:disabled}'
@@ -55,7 +77,7 @@
comment-contains-value: 'reverify'
projects:
- project-compare-type: 'REG_EXP'
- project-pattern: 'sandbox|releng'
+ project-pattern: 'sandbox|releng|octopus|pharos|functest'
branches:
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
@@ -85,3 +107,11 @@
builders:
- shell:
!include-raw: ./anteater-report-to-gerrit.sh
+
+- builder:
+ name: anteater-security-audit-weekly
+ builders:
+ - shell:
+ !include-raw:
+ - ./anteater-clone-all-repos.sh
+ - ./anteater-security-audit-weekly.sh
diff --git a/jjb/compass4nfv/compass-ci-jobs.yml b/jjb/compass4nfv/compass-ci-jobs.yml
index 3ba69fab7..24724912a 100644
--- a/jjb/compass4nfv/compass-ci-jobs.yml
+++ b/jjb/compass4nfv/compass-ci-jobs.yml
@@ -163,14 +163,28 @@
unstable-threshold: 'FAILURE'
# dovetail only master by now, not sync with A/B/C branches
# here the stream means the SUT stream, dovetail stream is defined in its own job
- # only run on os-(nosdn|odl_l2)-(nofeature|bgpvpn)-ha scenario
+ # only run on os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha scenario
+ # run against SUT master branch, dovetail docker image with latest tag
+ # run against SUT danube branch, dovetail docker image with latest tag(odd days)and cvp.X.X.X tag(even days)
- conditional-step:
- condition-kind: regex-match
- regex: os-(nosdn|odl_l2)-(nofeature|bgpvpn)-ha
- label: '{scenario}'
+ condition-kind: and
+ condition-operands:
+ - condition-kind: regex-match
+ regex: danube
+ label: '{stream}'
+ - condition-kind: regex-match
+ regex: os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha
+ label: '{scenario}'
+ - condition-kind: day-of-week
+ day-selector: select-days
+ days:
+ MON: true
+ WED: true
+ FRI: true
+ SUN: true
steps:
- trigger-builds:
- - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
+ - project: 'dovetail-compass-{pod}-proposed_tests-master'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
@@ -180,6 +194,45 @@
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
+ - conditional-step:
+ condition-kind: and
+ condition-operands:
+ - condition-kind: regex-match
+ regex: danube
+ label: '{stream}'
+ - condition-kind: regex-match
+ regex: os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha
+ label: '{scenario}'
+ - condition-kind: day-of-week
+ day-selector: select-days
+ days:
+ TUE: true
+ THU: true
+ SAT: true
+ steps:
+ - trigger-builds:
+ - project: 'dovetail-compass-{pod}-proposed_tests-danube'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO={scenario}
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - conditional-step:
+ condition-kind: and
+ condition-operands:
+ - condition-kind: regex-match
+ regex: os-(nosdn|odl_l2)-(nofeature|bgpvpn)-ha
+ label: '{scenario}'
+ - condition-kind: regex-match
+ regex: master
+ label: '{stream}'
+ steps:
+ - trigger-builds:
+ - project: 'dovetail-compass-{pod}-proposed_tests-master'
- job-template:
name: 'compass-deploy-{pod}-daily-{stream}'
@@ -205,7 +258,7 @@
- build-name:
name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
- timeout:
- timeout: 120
+ timeout: 240
abort: true
- fix-workspace-permissions
@@ -345,11 +398,11 @@
- trigger:
name: 'compass-os-ocl-nofeature-ha-baremetal-danube-trigger'
triggers:
- - timed: '0 5 * * *'
+ - timed: ''
- trigger:
name: 'compass-os-onos-sfc-ha-baremetal-danube-trigger'
triggers:
- - timed: ''
+ - timed: '0 5 * * *'
- trigger:
name: 'compass-os-odl_l2-moon-ha-baremetal-danube-trigger'
triggers:
diff --git a/jjb/compass4nfv/compass-dovetail-jobs.yml b/jjb/compass4nfv/compass-dovetail-jobs.yml
index 966dae50a..67d1e4eee 100644
--- a/jjb/compass4nfv/compass-dovetail-jobs.yml
+++ b/jjb/compass4nfv/compass-dovetail-jobs.yml
@@ -136,7 +136,7 @@
- build-name:
name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
- timeout:
- timeout: 120
+ timeout: 240
abort: true
- fix-workspace-permissions
diff --git a/jjb/compass4nfv/compass-verify-jobs.yml b/jjb/compass4nfv/compass-verify-jobs.yml
index 4b05e2203..e43f976b5 100644
--- a/jjb/compass4nfv/compass-verify-jobs.yml
+++ b/jjb/compass4nfv/compass-verify-jobs.yml
@@ -74,7 +74,7 @@
wrappers:
- ssh-agent-wrapper
- timeout:
- timeout: 120
+ timeout: 240
fail: true
- fix-workspace-permissions
@@ -197,7 +197,7 @@
wrappers:
- ssh-agent-wrapper
- timeout:
- timeout: 120
+ timeout: 240
fail: true
- fix-workspace-permissions
@@ -297,7 +297,7 @@
wrappers:
- ssh-agent-wrapper
- timeout:
- timeout: 120
+ timeout: 240
fail: true
- fix-workspace-permissions
diff --git a/jjb/doctor/doctor.yml b/jjb/doctor/doctor.yml
index 807d436da..eb230b59d 100644
--- a/jjb/doctor/doctor.yml
+++ b/jjb/doctor/doctor.yml
@@ -38,8 +38,15 @@
profiler: 'poc'
auto-trigger-name: 'experimental'
+ pod:
+ - arm-pod2:
+ slave-label: '{pod}'
+ - arm-pod3:
+ slave-label: '{pod}'
+
jobs:
- 'doctor-verify-{stream}'
+ - 'doctor-{task}-{installer}-{inspector}-{pod}-{stream}'
- 'doctor-{task}-{installer}-{inspector}-{stream}'
- job-template:
@@ -83,6 +90,32 @@
- shell: "[ -e tests/run.sh ] && bash -n ./tests/run.sh"
- job-template:
+ name: 'doctor-{task}-{installer}-{inspector}-{pod}-{stream}'
+
+ node: '{slave-label}'
+
+ disabled: '{obj:disabled}'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - 'opnfv-build-ubuntu-defaults'
+
+ scm:
+ - git-scm-gerrit
+
+
+ triggers:
+ - '{auto-trigger-name}':
+ project: '{project}'
+ branch: '{branch}'
+
+ builders:
+ - shell: "[ -e tests/run.sh ] && bash -n ./tests/run.sh"
+
+
+- job-template:
name: 'doctor-{task}-{installer}-{inspector}-{stream}'
node: '{slave-label}'
diff --git a/jjb/dovetail/dovetail-ci-jobs.yml b/jjb/dovetail/dovetail-ci-jobs.yml
index 682948d8b..9fdce31f3 100644
--- a/jjb/dovetail/dovetail-ci-jobs.yml
+++ b/jjb/dovetail/dovetail-ci-jobs.yml
@@ -25,7 +25,7 @@
branch: 'stable/{stream}'
dovetail-branch: master
gs-pathname: '/{stream}'
- docker-tag: 'latest'
+ docker-tag: 'cvp.0.1.0'
#-----------------------------------
# POD, PLATFORM, AND BRANCH MAPPING
diff --git a/jjb/dovetail/dovetail-cleanup.sh b/jjb/dovetail/dovetail-cleanup.sh
index 0ee789a97..3ae0cbcc9 100755
--- a/jjb/dovetail/dovetail-cleanup.sh
+++ b/jjb/dovetail/dovetail-cleanup.sh
@@ -1,4 +1,11 @@
#!/bin/bash
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
[[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
diff --git a/jjb/dovetail/dovetail-run.sh b/jjb/dovetail/dovetail-run.sh
index dce7e5862..d423e9d29 100755
--- a/jjb/dovetail/dovetail-run.sh
+++ b/jjb/dovetail/dovetail-run.sh
@@ -1,4 +1,11 @@
#!/bin/bash
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
#the noun INSTALLER is used in community, here is just the example to run.
#multi-platforms are supported.
@@ -7,14 +14,12 @@ set -e
[[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
DOVETAIL_HOME=${WORKSPACE}/cvp
-if [ -d ${DOVETAIL_HOME} ]; then
- sudo rm -rf ${DOVETAIL_HOME}/*
-else
- sudo mkdir -p ${DOVETAIL_HOME}
-fi
+[ -d ${DOVETAIL_HOME} ] && sudo rm -rf ${DOVETAIL_HOME}
+
+mkdir -p ${DOVETAIL_HOME}
DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
-sudo mkdir -p ${DOVETAIL_CONFIG}
+mkdir -p ${DOVETAIL_CONFIG}
sshkey=""
# The path of openrc.sh is defined in fetch_os_creds.sh
@@ -47,7 +52,7 @@ releng_repo=${WORKSPACE}/releng
git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
- sudo /bin/bash ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
+ ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
fi
if [[ -f $OPENRC ]]; then
@@ -102,7 +107,8 @@ if [ "$INSTALLER_TYPE" == "fuel" ]; then
fi
# sdnvpn test case needs to download this image first before running
-sudo wget -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${DOVETAIL_CONFIG}
+echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
+wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${DOVETAIL_CONFIG}
opts="--privileged=true -id"
diff --git a/jjb/fuel/fuel-daily-jobs.yml b/jjb/fuel/fuel-daily-jobs.yml
index dd0590c72..68677089d 100644
--- a/jjb/fuel/fuel-daily-jobs.yml
+++ b/jjb/fuel/fuel-daily-jobs.yml
@@ -293,6 +293,10 @@
name: GS_URL
default: artifacts.opnfv.org/$PROJECT{gs-pathname}
description: "URL to Google Storage."
+ - string:
+ name: SSH_KEY
+ default: "/tmp/mcp.rsa"
+ description: "Path to private SSH key to access environment nodes. For MCP deployments only."
########################
# trigger macros
########################
diff --git a/jjb/fuel/fuel-deploy.sh b/jjb/fuel/fuel-deploy.sh
index 4d48ee587..2fb5c71e4 100755
--- a/jjb/fuel/fuel-deploy.sh
+++ b/jjb/fuel/fuel-deploy.sh
@@ -12,11 +12,13 @@ set -o pipefail
export TERM="vt220"
-# source the file so we get OPNFV vars
-source latest.properties
+if [[ "$BRANCH" != 'master' ]]; then
+ # source the file so we get OPNFV vars
+ source latest.properties
-# echo the info about artifact that is used during the deployment
-echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+ # echo the info about artifact that is used during the deployment
+ echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+fi
if [[ "$JOB_NAME" =~ "merge" ]]; then
# set simplest scenario for virtual deploys to run for merges
@@ -75,7 +77,7 @@ echo "--------------------------------------------------------"
echo "Scenario: $DEPLOY_SCENARIO"
echo "Lab: $LAB_NAME"
echo "POD: $POD_NAME"
-echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
+[[ "$BRANCH" != 'master' ]] && echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
echo
echo "Starting the deployment using $INSTALLER_TYPE. This could take some time..."
echo "--------------------------------------------------------"
diff --git a/jjb/fuel/fuel-download-artifact.sh b/jjb/fuel/fuel-download-artifact.sh
index 8cc552e8d..c3b8253de 100755
--- a/jjb/fuel/fuel-download-artifact.sh
+++ b/jjb/fuel/fuel-download-artifact.sh
@@ -10,6 +10,9 @@
set -o errexit
set -o pipefail
+# disable Fuel ISO download for master branch
+[[ "$BRANCH" == 'master' ]] && exit 0
+
# use proxy url to replace the nomral URL, for googleusercontent.com will be blocked randomly
[[ "$NODE_NAME" =~ (zte) ]] && GS_URL=${GS_BASE_PROXY%%/*}/$GS_URL
diff --git a/jjb/global/installer-params.yml b/jjb/global/installer-params.yml
index 40fc42c76..ee154af03 100644
--- a/jjb/global/installer-params.yml
+++ b/jjb/global/installer-params.yml
@@ -38,6 +38,14 @@
default: '10.20.0.2'
description: 'IP of the installer'
- string:
+ name: SALT_MASTER_IP
+ default: '192.168.10.100'
+ description: 'IP of the salt master (for mcp deployments)'
+ - string:
+ name: SSH_KEY
+ default: '/tmp/mcp.rsa'
+ description: 'Path to private SSH key to access environment nodes'
+ - string:
name: INSTALLER_TYPE
default: fuel
description: 'Installer used for deploying OPNFV on this POD'
diff --git a/jjb/global/releng-macros.yml b/jjb/global/releng-macros.yml
index 5341db464..e4dfa8d80 100644
--- a/jjb/global/releng-macros.yml
+++ b/jjb/global/releng-macros.yml
@@ -259,8 +259,11 @@
find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
sed -e "s|^$local_path| http://$gs_path|" >> gerrit_comment.txt
+# To take advantage of this macro, have your build write
+# out the file 'gerrit_comment.txt' with information to post
+# back to gerrit and include this macro in the list of builders.
- builder:
- name: report-docs-build-result-to-gerrit
+ name: report-build-result-to-gerrit
builders:
- shell: |
#!/bin/bash
@@ -407,14 +410,14 @@
builders:
- build-html-and-pdf-docs-output
- upload-under-review-docs-to-opnfv-artifacts
- - report-docs-build-result-to-gerrit
+ - report-build-result-to-gerrit
- builder:
name: upload-merged-docs
builders:
- build-html-and-pdf-docs-output
- upload-generated-docs-to-opnfv-artifacts
- - report-docs-build-result-to-gerrit
+ - report-build-result-to-gerrit
- remove-old-docs-from-opnfv-artifacts
- builder:
diff --git a/jjb/netready/netready.yml b/jjb/netready/netready.yml
index 9a4d8858c..2702c45b3 100644
--- a/jjb/netready/netready.yml
+++ b/jjb/netready/netready.yml
@@ -58,7 +58,7 @@
- job-template:
name: 'netready-build-gluon-packages-daily-{stream}'
- disabled: false
+ disabled: true
concurrent: true
diff --git a/jjb/releng/opnfv-docker.sh b/jjb/releng/opnfv-docker.sh
index 2aa52adc5..ebd0c9f3d 100644
--- a/jjb/releng/opnfv-docker.sh
+++ b/jjb/releng/opnfv-docker.sh
@@ -73,6 +73,8 @@ fi
# Get tag version
echo "Current branch: $BRANCH"
+BUILD_BRANCH=$BRANCH
+
if [[ "$BRANCH" == "master" ]]; then
DOCKER_TAG="latest"
elif [[ -n "${RELEASE_VERSION-}" ]]; then
@@ -82,19 +84,17 @@ else
DOCKER_TAG="stable"
fi
+if [[ -n "${COMMIT_ID-}" && -n "${RELEASE_VERSION-}" ]]; then
+ DOCKER_TAG=$RELEASE_VERSION
+ BUILD_BRANCH=$COMMIT_ID
+fi
+
# Start the build
echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG"
echo "--------------------------------------------------------"
echo
-if [[ $DOCKER_REPO_NAME == *"dovetail"* ]]; then
- if [[ -n "${RELEASE_VERSION-}" ]]; then
- DOCKER_TAG=${RELEASE_VERSION}
- fi
- cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG -f $DOCKERFILE ."
-else
- cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH
- -f $DOCKERFILE ."
-fi
+cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BUILD_BRANCH
+ -f $DOCKERFILE ."
echo ${cmd}
${cmd}
diff --git a/jjb/releng/opnfv-docker.yml b/jjb/releng/opnfv-docker.yml
index 5fe0eb913..095ba4129 100644
--- a/jjb/releng/opnfv-docker.yml
+++ b/jjb/releng/opnfv-docker.yml
@@ -106,6 +106,10 @@
default: "opnfv/{project}"
description: "Dockerhub repo to be pushed to."
- string:
+ name: COMMIT_ID
+ default: ""
+ description: "commit id to make a snapshot docker image"
+ - string:
name: RELEASE_VERSION
default: ""
description: "Release version, e.g. 1.0, 2.0, 3.0"
diff --git a/jjb/releng/testapi-docker-deploy.sh b/jjb/releng/testapi-docker-deploy.sh
index b4e60b09a..02c5e3a27 100644
--- a/jjb/releng/testapi-docker-deploy.sh
+++ b/jjb/releng/testapi-docker-deploy.sh
@@ -4,7 +4,7 @@ function check() {
# Verify hosted
sleep 5
- cmd=`curl -s --head --request GET http://testresults.opnfv.org/test/swagger/spec | grep '200 OK' > /dev/null`
+ cmd=`curl -s --head --request GET http://testresults.opnfv.org/test/swagger/APIs | grep '200 OK' > /dev/null`
rc=$?
echo $rc
diff --git a/jjb/storperf/storperf.yml b/jjb/storperf/storperf.yml
index be53b27b4..13186a1ad 100644
--- a/jjb/storperf/storperf.yml
+++ b/jjb/storperf/storperf.yml
@@ -201,7 +201,7 @@
- git-scm
triggers:
- - timed: '0 18 * * *'
+ - timed: '0 22 * * *'
builders:
- shell: |
diff --git a/jjb/yardstick/yardstick-daily-jobs.yml b/jjb/yardstick/yardstick-daily-jobs.yml
index 5ff36f842..ff1d47eb4 100644
--- a/jjb/yardstick/yardstick-daily-jobs.yml
+++ b/jjb/yardstick/yardstick-daily-jobs.yml
@@ -282,7 +282,7 @@
publishers:
- email:
- recipients: jean.gaoliang@huawei.com limingjiang@huawei.com
+ recipients: jean.gaoliang@huawei.com limingjiang@huawei.com ross.b.brattain@intel.com
########################
# builder macros
diff --git a/jjb/yardstick/yardstick-daily.sh b/jjb/yardstick/yardstick-daily.sh
index 973f83ad5..1c2abad3f 100755
--- a/jjb/yardstick/yardstick-daily.sh
+++ b/jjb/yardstick/yardstick-daily.sh
@@ -31,7 +31,8 @@ fi
opts="--privileged=true --rm"
envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
-e NODE_NAME=${NODE_NAME} -e EXTERNAL_NETWORK=${EXTERNAL_NETWORK} \
- -e YARDSTICK_BRANCH=${BRANCH} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
+ -e YARDSTICK_BRANCH=${BRANCH} -e BRANCH=${BRANCH} \
+ -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
# Pull the image with correct tag
echo "Yardstick: Pulling image opnfv/yardstick:${DOCKER_TAG}"
diff --git a/prototypes/xci/README.rst b/prototypes/xci/README.rst
index 0d9366533..b65abde9b 100644
--- a/prototypes/xci/README.rst
+++ b/prototypes/xci/README.rst
@@ -185,6 +185,23 @@ continuously chasing the HEAD of corresponding branches.
Once a working version is identified, the versions of the upstream components
are then bumped in releng repo.
+==================
+XCI developer tips
+==================
+
+It is possible to run XCI in development mode, in order to test the
+latest changes. When deploying on this mode, the script will use the working
+directories for releng/bifrost/OSA, instead of cloning the whole repositories
+on each run.
+To enable it, you need to export the different DEV_PATH vars:
+
+- export OPNFV_RELENG_DEV_PATH=/opt/releng/
+- export OPENSTACK_BIFROST_DEV_PATH=/opt/bifrost
+- export OPENSTACK_OSA_DEV_PATH=/opt/openstack-ansible
+
+This will cause the deployment to pick the development copies stored at the
+specified directories, and use them instead of cloning those on every run.
+
===========================================
Limitations, Known Issues, and Improvements
===========================================
diff --git a/prototypes/xci/config/pinned-versions b/prototypes/xci/config/pinned-versions
index e3b49c7d4..c42693671 100755
--- a/prototypes/xci/config/pinned-versions
+++ b/prototypes/xci/config/pinned-versions
@@ -22,6 +22,6 @@
# use releng from master until the development work with the sandbox is complete
export OPNFV_RELENG_VERSION="master"
# HEAD of "master" as of 04.04.2017
-export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"6109f824e5510e794dbf1968c3859e8b6356d280"}
+export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"7c9bb5e07c6bc3b42c9a9e8457e5eef511075b38"}
# HEAD of "master" as of 04.04.2017
export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"d9e1330c7ff9d72a604b6b4f3af765f66a01b30e"}
diff --git a/prototypes/xci/config/user-vars b/prototypes/xci/config/user-vars
index 5ed539627..fd11a5845 100755
--- a/prototypes/xci/config/user-vars
+++ b/prototypes/xci/config/user-vars
@@ -56,3 +56,6 @@ export LOG_PATH=${LOG_PATH:-${XCI_DEVEL_ROOT}/opnfv/logs}
export RUN_TEMPEST=${RUN_TEMPEST:-false}
# Set this to to true to force XCI to re-create the target OS images
export CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
+# Set this to a full path pointing to extra config files (containing
+# group_vars/all)
+export XCI_EXTRA_VARS_PATH=${XCI_EXTRA_VARS_PATH:-""}
diff --git a/prototypes/xci/file/ansible-role-requirements.yml b/prototypes/xci/file/ansible-role-requirements.yml
index 842bcc44c..5a96e2a82 100644
--- a/prototypes/xci/file/ansible-role-requirements.yml
+++ b/prototypes/xci/file/ansible-role-requirements.yml
@@ -9,6 +9,10 @@
##############################################################################
# these versions are extracted based on the osa commit d9e1330c7ff9d72a604b6b4f3af765f66a01b30e on 04.04.2017
# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=d9e1330c7ff9d72a604b6b4f3af765f66a01b30e
+- name: ansible-hardening
+ scm: git
+ src: https://git.openstack.org/openstack/ansible-hardening
+ version: 051fe3195f59d1ee8db06fca5d2cce7a25e58861
- name: apt_package_pinning
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
diff --git a/prototypes/xci/playbooks/provision-vm-nodes.yml b/prototypes/xci/playbooks/provision-vm-nodes.yml
index 8be36c7a9..92b5c5535 100644
--- a/prototypes/xci/playbooks/provision-vm-nodes.yml
+++ b/prototypes/xci/playbooks/provision-vm-nodes.yml
@@ -42,6 +42,15 @@
delete: yes
when:
- OPNFV_RELENG_DEV_PATH != ""
+ - name: Copy extra vars to releng and bifrost
+ synchronize:
+ src: "{{ XCI_EXTRA_VARS_PATH }}"
+ dest: "{{ item }}"
+ with_items:
+ - "{{ OPNFV_RELENG_PATH }}/prototypes/xci/playbooks"
+ - "{{ OPENSTACK_BIFROST_PATH }}/playbooks/inventory"
+ when:
+ - XCI_EXTRA_VARS_PATH != ""
- hosts: localhost
connection: local
diff --git a/prototypes/xci/var/opnfv.yml b/prototypes/xci/var/opnfv.yml
index 85f532ad2..aeafaceb1 100644
--- a/prototypes/xci/var/opnfv.yml
+++ b/prototypes/xci/var/opnfv.yml
@@ -27,3 +27,4 @@ XCI_LOOP: "{{ lookup('env','XCI_LOOP') }}"
LOG_PATH: "{{ lookup('env','LOG_PATH') }}"
OPNFV_HOST_IP: "{{ lookup('env','OPNFV_HOST_IP') }}"
OPNFV_SSH_HOST_KEYS_PATH: "{{ lookup('env', 'OPNFV_SSH_HOST_KEYS_PATH') }}"
+XCI_EXTRA_VARS_PATH: "{{ lookup('env', 'XCI_EXTRA_VARS_PATH') }}"
diff --git a/utils/fetch_os_creds.sh b/utils/fetch_os_creds.sh
index 458bbda3b..993c0b948 100755
--- a/utils/fetch_os_creds.sh
+++ b/utils/fetch_os_creds.sh
@@ -12,8 +12,9 @@ set -o nounset
set -o pipefail
usage() {
- echo "usage: $0 [-v] -d <destination> -i <installer_type> -a <installer_ip>" >&2
+ echo "usage: $0 [-v] -d <destination> -i <installer_type> -a <installer_ip> [-s <ssh_key>]" >&2
echo "[-v] Virtualized deployment" >&2
+ echo "[-s <ssh_key>] Path to ssh key. For MCP deployments only" >&2
}
info () {
@@ -53,11 +54,12 @@ swap_to_public() {
: ${DEPLOY_TYPE:=''}
#Get options
-while getopts ":d:i:a:h:v" optchar; do
+while getopts ":d:i:a:h:s:v" optchar; do
case "${optchar}" in
d) dest_path=${OPTARG} ;;
i) installer_type=${OPTARG} ;;
a) installer_ip=${OPTARG} ;;
+ s) ssh_key=${OPTARG} ;;
v) DEPLOY_TYPE="virt" ;;
*) echo "Non-option argument: '-${OPTARG}'" >&2
usage
@@ -70,6 +72,9 @@ done
dest_path=${dest_path:-$HOME/opnfv-openrc.sh}
installer_type=${installer_type:-$INSTALLER_TYPE}
installer_ip=${installer_ip:-$INSTALLER_IP}
+if [ "${installer_type}" == "fuel" ] && [ "${BRANCH}" == "master" ]; then
+ installer_ip=${SALT_MASTER_IP}
+fi
if [ -z $dest_path ] || [ -z $installer_type ] || [ -z $installer_ip ]; then
usage
@@ -89,40 +94,45 @@ ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
# Start fetching the files
if [ "$installer_type" == "fuel" ]; then
- #ip_fuel="10.20.0.2"
verify_connectivity $installer_ip
+ if [ "${BRANCH}" == "master" ]; then
+ ssh_key=${ssh_key:-$SSH_KEY}
+ if [ -z $ssh_key ] || [ ! -f $ssh_key ]; then
+ error "Please provide path to existing ssh key for mcp deployment."
+ exit 2
+ fi
+ ssh_options+=" -i ${ssh_key}"
- env=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
- 'fuel env'|grep operational|head -1|awk '{print $1}') &> /dev/null
- if [ -z $env ]; then
- error "No operational environment detected in Fuel"
- fi
- env_id="${FUEL_ENV:-$env}"
-
- # Check if controller is alive (online='True')
- controller_ip=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
- "fuel node --env ${env_id} | grep controller | grep 'True\| 1' | awk -F\| '{print \$5}' | head -1" | \
- sed 's/ //g') &> /dev/null
+ # retrieving controller vip
+ controller_ip=$(ssh 2>/dev/null ${ssh_options} ubuntu@${installer_ip} \
+ "sudo salt --out txt 'ctl01*' pillar.get _param:openstack_control_address | awk '{print \$2}'" | \
+ sed 's/ //g') &> /dev/null
- if [ -z $controller_ip ]; then
- error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
- fi
+ info "Fetching rc file from controller $controller_ip..."
+ ssh ${ssh_options} ubuntu@${controller_ip} "sudo cat /root/keystonercv3" > $dest_path
+ else
+ #ip_fuel="10.20.0.2"
+ env=$(sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
+ 'fuel env'|grep operational|head -1|awk '{print $1}') &> /dev/null
+ if [ -z $env ]; then
+ error "No operational environment detected in Fuel"
+ fi
+ env_id="${FUEL_ENV:-$env}"
- info "Fetching rc file from controller $controller_ip..."
- sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
- "scp $ssh_options ${controller_ip}:/root/openrc ." &> /dev/null
- sshpass -p r00tme scp 2>/dev/null $ssh_options root@${installer_ip}:~/openrc $dest_path &> /dev/null
+ # Check if controller is alive (online='True')
+ controller_ip=$(sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
+ "fuel node --env ${env_id} | grep controller | grep 'True\| 1' | awk -F\| '{print \$5}' | head -1" | \
+ sed 's/ //g') &> /dev/null
- #This file contains the mgmt keystone API, we need the public one for our rc file
- admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//')
- public_ip=$(sshpass -p r00tme ssh $ssh_options root@${installer_ip} \
- "ssh ${controller_ip} 'source openrc; openstack endpoint list'" \
- | grep keystone | grep public | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null
- #| grep http | head -1 | cut -d '|' -f 4 | sed 's/v1\/.*/v1\//' | sed 's/ //g') &> /dev/null
- #NOTE: this is super ugly sed 's/v1\/.*/v1\//'OS_AUTH_URL
- # but sometimes the output of endpoint-list is like this: http://172.30.9.70:8004/v1/%(tenant_id)s
- # Fuel virtual need a fix
+ if [ -z $controller_ip ]; then
+ error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
+ fi
+ info "Fetching rc file from controller $controller_ip..."
+ sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
+ "scp ${ssh_options} ${controller_ip}:/root/openrc ." &> /dev/null
+ sshpass -p r00tme scp 2>/dev/null ${ssh_options} root@${installer_ip}:~/openrc $dest_path &> /dev/null
+ fi
#convert to v3 URL
auth_url=$(cat $dest_path|grep AUTH_URL)
if [[ -z `echo $auth_url |grep v3` ]]; then
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
index e700e047f..77ab7840f 100755
--- a/utils/test/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -107,7 +107,6 @@ for version in versions:
scenario_results = rp_utils.getScenarios(healthcheck,
installer,
version)
-
# get nb of supported architecture (x86, aarch64)
architectures = rp_utils.getArchitectures(scenario_results)
logger.info("Supported architectures: {}".format(architectures))
diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py
index 6e6585a32..0304298b4 100755
--- a/utils/test/reporting/functest/reporting-tempest.py
+++ b/utils/test/reporting/functest/reporting-tempest.py
@@ -1,4 +1,15 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# SPDX-license-identifier: Apache-2.0
+
from urllib2 import Request, urlopen, URLError
+from datetime import datetime
import json
import jinja2
import os
@@ -97,7 +108,13 @@ for version in rp_utils.get_config('general.versions'):
crit_rate = True
# Expect that the suite duration is inferior to 30m
- if result['details']['duration'] < criteria_duration:
+ stop_date = datetime.strptime(result['stop_date'],
+ '%Y-%m-%d %H:%M:%S')
+ start_date = datetime.strptime(result['start_date'],
+ '%Y-%m-%d %H:%M:%S')
+
+ delta = stop_date - start_date
+ if (delta.total_seconds() < criteria_duration):
crit_time = True
result['criteria'] = {'tests': crit_tests,
diff --git a/utils/test/reporting/utils/reporting_utils.py b/utils/test/reporting/utils/reporting_utils.py
index 599a93818..0a178ba1f 100644
--- a/utils/test/reporting/utils/reporting_utils.py
+++ b/utils/test/reporting/utils/reporting_utils.py
@@ -117,19 +117,29 @@ def getScenarios(case, installer, version):
url = ("http://" + url_base + "?case=" + case +
"&period=" + str(period) + "&installer=" + installer +
"&version=" + version)
- request = Request(url)
try:
+ request = Request(url)
response = urlopen(request)
k = response.read()
results = json.loads(k)
test_results = results['results']
- except URLError as e:
- print('Got an error code:', e)
+
+ page = results['pagination']['total_pages']
+ if page > 1:
+ test_results = []
+ for i in range(1, page + 1):
+ url_page = url + "&page=" + str(i)
+ request = Request(url_page)
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ test_results += results['results']
+ except URLError as err:
+ print('Got an error code:', err)
if test_results is not None:
test_results.reverse()
-
scenario_results = {}
for r in test_results:
@@ -157,7 +167,6 @@ def getScenarioStats(scenario_results):
return scenario_stats
-# TODO convergence with above function getScenarios
def getScenarioStatus(installer, version):
period = get_config('general.period')
url_base = get_config('testapi.url')
@@ -213,8 +222,8 @@ def getQtipResults(version, installer):
k = response.read()
response.close()
results = json.loads(k)['results']
- except URLError as e:
- print('Got an error code:', e)
+ except URLError as err:
+ print('Got an error code:', err)
result_dict = {}
if results:
@@ -427,9 +436,9 @@ def export_csv(scenario_file_name, installer, version):
"/functest/scenario_history_" +
installer + ".csv")
scenario_installer_file = open(scenario_installer_file_name, "a")
- with open(scenario_file_name, "r") as f:
+ with open(scenario_file_name, "r") as scenario_file:
scenario_installer_file.write("date,scenario,installer,detail,score\n")
- for line in f:
+ for line in scenario_file:
if installer in line:
scenario_installer_file.write(line)
scenario_installer_file.close
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/app.js b/utils/test/testapi/3rd_party/static/testapi-ui/app.js
index 4a2f23af9..8c701c36c 100644
--- a/utils/test/testapi/3rd_party/static/testapi-ui/app.js
+++ b/utils/test/testapi/3rd_party/static/testapi-ui/app.js
@@ -37,30 +37,30 @@
$stateProvider.
state('home', {
url: '/',
- templateUrl: '/testapi-ui/components/home/home.html'
+ templateUrl: 'testapi-ui/components/home/home.html'
}).
state('about', {
url: '/about',
- templateUrl: '/testapi-ui/components/about/about.html'
+ templateUrl: 'testapi-ui/components/about/about.html'
}).
state('guidelines', {
url: '/guidelines',
- templateUrl: '/testapi-ui/components/guidelines/guidelines.html',
+ templateUrl: 'testapi-ui/components/guidelines/guidelines.html',
controller: 'GuidelinesController as ctrl'
}).
state('communityResults', {
url: '/community_results',
- templateUrl: '/testapi-ui/components/results/results.html',
+ templateUrl: 'testapi-ui/components/results/results.html',
controller: 'ResultsController as ctrl'
}).
state('userResults', {
- url: '/user_results',
+ url: 'user_results',
templateUrl: '/testapi-ui/components/results/results.html',
controller: 'ResultsController as ctrl'
}).
state('resultsDetail', {
url: '/results/:testID',
- templateUrl: '/testapi-ui/components/results-report' +
+ templateUrl: 'testapi-ui/components/results-report' +
'/resultsReport.html',
controller: 'ResultsReportController as ctrl'
}).
@@ -71,12 +71,12 @@
}).
state('authFailure', {
url: '/auth_failure',
- templateUrl: '/testapi-ui/components/home/home.html',
+ templateUrl: 'testapi-ui/components/home/home.html',
controller: 'AuthFailureController as ctrl'
}).
state('logout', {
url: '/logout',
- templateUrl: '/testapi-ui/components/logout/logout.html',
+ templateUrl: 'testapi-ui/components/logout/logout.html',
controller: 'LogoutController as ctrl'
}).
state('userVendors', {
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/results/resultsController.js b/utils/test/testapi/3rd_party/static/testapi-ui/components/results/resultsController.js
index 93a549a7f..9e3540da5 100644
--- a/utils/test/testapi/3rd_party/static/testapi-ui/components/results/resultsController.js
+++ b/utils/test/testapi/3rd_party/static/testapi-ui/components/results/resultsController.js
@@ -123,8 +123,8 @@
ctrl.resultsRequest =
$http.get(content_url).success(function (data) {
ctrl.data = data;
- ctrl.totalItems = 20 // ctrl.data.pagination.total_pages * ctrl.itemsPerPage;
- ctrl.currentPage = 1 // ctrl.data.pagination.current_page;
+ ctrl.totalItems = ctrl.data.pagination.total_pages * ctrl.itemsPerPage;
+ ctrl.currentPage = ctrl.data.pagination.current_page;
}).error(function (error) {
ctrl.data = null;
ctrl.totalItems = 0;
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/config.json b/utils/test/testapi/3rd_party/static/testapi-ui/config.json
index 5d48c7b12..9fdd85fbb 100644
--- a/utils/test/testapi/3rd_party/static/testapi-ui/config.json
+++ b/utils/test/testapi/3rd_party/static/testapi-ui/config.json
@@ -1 +1 @@
-{"testapiApiUrl": "http://localhost:8000/api/v1"}
+{"testapiApiUrl": "http://testresults.opnfv.org/test/api/v1"}
diff --git a/utils/test/testapi/etc/config.ini b/utils/test/testapi/etc/config.ini
index 692e48897..dad59d2d0 100644
--- a/utils/test/testapi/etc/config.ini
+++ b/utils/test/testapi/etc/config.ini
@@ -8,8 +8,12 @@ dbname = test_results_collection
[api]
# Listening port
-url = http://localhost:8000/api/v1
+url = http://testresults.opnfv.org/test/api/v1
port = 8000
+
+# Number of results for one page (integer value)
+#results_per_page = 20
+
# With debug_on set to true, error traces will be shown in HTTP responses
debug = True
authenticate = False
@@ -18,7 +22,7 @@ authenticate = False
base_url = http://localhost:8000
[ui]
-url = http://localhost:8000
+url = http://testresults.opnfv.org/test
[osid]
@@ -41,7 +45,7 @@ openid_ns = http://specs.openid.net/auth/2.0
# Return endpoint in Refstack's API. Value indicating the endpoint
# where the user should be returned to after signing in. Openstack Id
# Idp only supports HTTPS address types. (string value)
-openid_return_to = /api/v1/auth/signin_return
+openid_return_to = v1/auth/signin_return
# Claimed identifier. This value must be set to
# "http://specs.openid.net/auth/2.0/identifier_select". or to user
diff --git a/utils/test/testapi/htmlize/htmlize.py b/utils/test/testapi/htmlize/htmlize.py
index b8c4fb43f..4576d9bb0 100644
--- a/utils/test/testapi/htmlize/htmlize.py
+++ b/utils/test/testapi/htmlize/htmlize.py
@@ -40,13 +40,13 @@ if __name__ == '__main__':
type=str,
required=False,
default=('http://testresults.opnfv.org'
- '/test/swagger/spec.json'),
+ '/test/swagger/resources.json'),
help='Resource Listing Spec File')
parser.add_argument('-au', '--api-declaration-url',
type=str,
required=False,
default=('http://testresults.opnfv.org'
- '/test/swagger/spec'),
+ '/test/swagger/APIs'),
help='API Declaration Spec File')
parser.add_argument('-o', '--output-directory',
required=True,
diff --git a/utils/test/testapi/opnfv_testapi/common/config.py b/utils/test/testapi/opnfv_testapi/common/config.py
index 46765ffd1..f73c0abf2 100644
--- a/utils/test/testapi/opnfv_testapi/common/config.py
+++ b/utils/test/testapi/opnfv_testapi/common/config.py
@@ -17,6 +17,7 @@ class Config(object):
def __init__(self):
self.file = self.CONFIG if self.CONFIG else self._default_config()
self._parse()
+ self._parse_per_page()
self.static_path = os.path.join(
os.path.dirname(os.path.normpath(__file__)),
os.pardir,
@@ -37,6 +38,10 @@ class Config(object):
[setattr(self, '{}_{}'.format(section, k), self._parse_value(v))
for k, v in config.items(section)]
+ def _parse_per_page(self):
+ if not hasattr(self, 'api_results_per_page'):
+ self.api_results_per_page = 20
+
@staticmethod
def _parse_value(value):
try:
diff --git a/utils/test/testapi/opnfv_testapi/resources/handlers.py b/utils/test/testapi/opnfv_testapi/resources/handlers.py
index 2fc31ca45..0234c8a73 100644
--- a/utils/test/testapi/opnfv_testapi/resources/handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/handlers.py
@@ -104,17 +104,50 @@ class GenericApiHandler(web.RequestHandler):
if query is None:
query = {}
data = []
+ sort = kwargs.get('sort')
+ page = kwargs.get('page', 0)
+ last = kwargs.get('last', 0)
+ per_page = kwargs.get('per_page', 0)
+
cursor = self._eval_db(self.table, 'find', query)
- if 'sort' in kwargs:
- cursor = cursor.sort(kwargs.get('sort'))
- if 'last' in kwargs:
- cursor = cursor.limit(kwargs.get('last'))
+ records_count = yield cursor.count()
+ records_nr = records_count
+ if (records_count > last) and (last > 0):
+ records_nr = last
+
+ pipelines = list()
+ if query:
+ pipelines.append({'$match': query})
+ if sort:
+ pipelines.append({'$sort': sort})
+
+ if page > 0:
+ total_pages, remainder = divmod(records_nr, per_page)
+ if remainder > 0:
+ total_pages += 1
+ pipelines.append({'$skip': (page - 1) * per_page})
+ pipelines.append({'$limit': per_page})
+ else:
+ pipelines.append({'$limit': records_nr})
+
+ cursor = self._eval_db(self.table,
+ 'aggregate',
+ pipelines,
+ allowDiskUse=True)
+
while (yield cursor.fetch_next):
data.append(self.format_data(cursor.next_object()))
if res_op is None:
res = {self.table: data}
else:
res = res_op(data, *args)
+ if page:
+ res.update({
+ 'pagination': {
+ 'current_page': page,
+ 'total_pages': total_pages
+ }
+ })
self.finish_request(res)
@web.asynchronous
diff --git a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
index 824a89e58..1773216c0 100644
--- a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
@@ -11,12 +11,15 @@ from datetime import timedelta
from bson import objectid
+from opnfv_testapi.common import config
from opnfv_testapi.common import message
from opnfv_testapi.common import raises
from opnfv_testapi.resources import handlers
from opnfv_testapi.resources import result_models
from opnfv_testapi.tornado_swagger import swagger
+CONF = config.Config()
+
class GenericResultHandler(handlers.GenericApiHandler):
def __init__(self, application, request, **kwargs):
@@ -135,22 +138,28 @@ class ResultsCLHandler(GenericResultHandler):
@type last: L{string}
@in last: query
@required last: False
+ @param page: which page to list
+ @type page: L{int}
+ @in page: query
+ @required page: False
@param trust_indicator: must be float
@type trust_indicator: L{float}
@in trust_indicator: query
@required trust_indicator: False
"""
+ limitations = {'sort': {'start_date': -1}}
last = self.get_query_argument('last', 0)
if last is not None:
last = self.get_int('last', last)
+ limitations.update({'last': last})
- page = self.get_query_argument('page', 0)
- if page:
- last = 20
+ page = self.get_query_argument('page', None)
+ if page is not None:
+ page = self.get_int('page', page)
+ limitations.update({'page': page,
+ 'per_page': CONF.api_results_per_page})
- self._list(query=self.set_query(),
- sort=[('start_date', -1)],
- last=last)
+ self._list(query=self.set_query(), **limitations)
@swagger.operation(nickname="createTestResult")
def post(self):
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py b/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py
index ef74a0857..adaf6f7c3 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py
@@ -20,38 +20,52 @@ def thread_execute(method, *args, **kwargs):
class MemCursor(object):
def __init__(self, collection):
self.collection = collection
- self.count = len(self.collection)
+ self.length = len(self.collection)
self.sorted = []
def _is_next_exist(self):
- return self.count != 0
+ return self.length != 0
@property
def fetch_next(self):
return thread_execute(self._is_next_exist)
def next_object(self):
- self.count -= 1
+ self.length -= 1
return self.collection.pop()
def sort(self, key_or_list):
- key = key_or_list[0][0]
- if key_or_list[0][1] == -1:
- reverse = True
- else:
- reverse = False
+ for k, v in key_or_list.iteritems():
+ if v == -1:
+ reverse = True
+ else:
+ reverse = False
- if key_or_list is not None:
self.collection = sorted(self.collection,
- key=itemgetter(key), reverse=reverse)
+ key=itemgetter(k), reverse=reverse)
return self
def limit(self, limit):
if limit != 0 and limit < len(self.collection):
- self.collection = self.collection[0:limit]
- self.count = limit
+ self.collection = self.collection[0: limit]
+ self.length = limit
+ return self
+
+ def skip(self, skip):
+ if skip < self.length and (skip > 0):
+ self.collection = self.collection[self.length - skip: -1]
+ self.length -= skip
+ elif skip >= self.length:
+ self.collection = []
+ self.length = 0
return self
+ def _count(self):
+ return self.length
+
+ def count(self):
+ return thread_execute(self._count)
+
class MemDb(object):
@@ -187,6 +201,27 @@ class MemDb(object):
def find(self, *args):
return MemCursor(self._find(*args))
+ def _aggregate(self, *args, **kwargs):
+ res = self.contents
+ print args
+ for arg in args[0]:
+ for k, v in arg.iteritems():
+ if k == '$match':
+ res = self._find(v)
+ cursor = MemCursor(res)
+ for arg in args[0]:
+ for k, v in arg.iteritems():
+ if k == '$sort':
+ cursor = cursor.sort(v)
+ elif k == '$skip':
+ cursor = cursor.skip(v)
+ elif k == '$limit':
+ cursor = cursor.limit(v)
+ return cursor
+
+ def aggregate(self, *args, **kwargs):
+ return self._aggregate(*args, **kwargs)
+
def _update(self, spec, document, check_keys=True):
updated = False