summaryrefslogtreecommitdiffstats
path: root/jjb
diff options
context:
space:
mode:
Diffstat (limited to 'jjb')
-rwxr-xr-xjjb/3rd_party_ci/download-netvirt-artifact.sh13
-rw-r--r--jjb/3rd_party_ci/odl-netvirt.yml9
-rwxr-xr-xjjb/apex/apex-iso-verify.sh104
-rw-r--r--jjb/apex/apex-snapshot-deploy.sh1
-rwxr-xr-xjjb/apex/apex-upload-artifact.sh81
-rw-r--r--jjb/apex/apex.yml82
-rw-r--r--jjb/armband/armband-ci-jobs.yml125
-rwxr-xr-xjjb/armband/armband-deploy.sh4
-rw-r--r--jjb/bottlenecks/bottlenecks-project-jobs.yml10
-rw-r--r--jjb/compass4nfv/compass-ci-jobs.yml2
-rw-r--r--jjb/compass4nfv/compass-dovetail-jobs.yml2
-rw-r--r--jjb/compass4nfv/compass-project-jobs.yml2
-rw-r--r--jjb/compass4nfv/compass-verify-jobs.yml2
-rw-r--r--jjb/cperf/cperf-ci-jobs.yml2
-rwxr-xr-xjjb/daisy4nfv/daisy4nfv-basic.sh1
-rwxr-xr-xjjb/daisy4nfv/daisy4nfv-build.sh4
-rwxr-xr-xjjb/daisy4nfv/daisy4nfv-download-artifact.sh10
-rw-r--r--jjb/doctor/doctor.yml2
-rw-r--r--jjb/dovetail/dovetail-ci-jobs.yml31
-rwxr-xr-xjjb/dovetail/dovetail-run.sh55
-rw-r--r--jjb/dovetail/dovetail-weekly-jobs.yml1
-rw-r--r--jjb/fuel/fuel-daily-jobs.yml102
-rw-r--r--jjb/functest/functest-daily-jobs.yml13
-rwxr-xr-xjjb/functest/functest-loop.sh12
-rwxr-xr-xjjb/functest/set-functest-env.sh19
-rw-r--r--jjb/global/slave-params.yml24
-rw-r--r--jjb/joid/joid-daily-jobs.yml2
-rw-r--r--jjb/kvmfornfv/kvmfornfv.yml2
-rw-r--r--jjb/opera/opera-daily-jobs.yml105
-rw-r--r--jjb/releng/opnfv-docker-arm.yml8
-rw-r--r--jjb/releng/opnfv-docker.sh14
-rwxr-xr-xjjb/securedlab/check-jinja2.sh9
-rw-r--r--jjb/securedlab/check-jinja2.yml80
-rwxr-xr-xjjb/xci/bifrost-provision.sh4
-rwxr-xr-xjjb/xci/bifrost-verify.sh6
-rw-r--r--jjb/yardstick/yardstick-ci-jobs.yml17
36 files changed, 753 insertions, 207 deletions
diff --git a/jjb/3rd_party_ci/download-netvirt-artifact.sh b/jjb/3rd_party_ci/download-netvirt-artifact.sh
index 6aea01d2a..7ecf8d78d 100755
--- a/jjb/3rd_party_ci/download-netvirt-artifact.sh
+++ b/jjb/3rd_party_ci/download-netvirt-artifact.sh
@@ -6,11 +6,18 @@ set -o pipefail
ODL_ZIP=distribution-karaf-0.6.0-SNAPSHOT.zip
echo "Attempting to fetch the artifact location from ODL Jenkins"
-CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~master~$GERRIT_CHANGE_ID/detail"
+if [ "$ODL_BRANCH" != 'master' ]; then
+ DIST=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\2#p')
+ ODL_BRANCH=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\1%2F\2#p')
+else
+ DIST='nitrogen'
+fi
+CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~${ODL_BRANCH}~${GERRIT_CHANGE_ID}/detail"
# due to limitation with the Jenkins Gerrit Trigger, we need to use Gerrit REST API to get the change details
-ODL_BUILD_JOB_NUM=$(curl -s $CHANGE_DETAILS_URL | grep -Eo 'netvirt-distribution-check-carbon/[0-9]+' | tail -1 | grep -Eo [0-9]+)
+ODL_BUILD_JOB_NUM=$(curl --fail -s ${CHANGE_DETAILS_URL} | grep -Eo "netvirt-distribution-check-${DIST}/[0-9]+" | tail -1 | grep -Eo [0-9]+)
+DISTRO_CHECK_CONSOLE_LOG="https://logs.opendaylight.org/releng/jenkins092/netvirt-distribution-check-${DIST}/${ODL_BUILD_JOB_NUM}/console.log.gz"
+NETVIRT_ARTIFACT_URL=$(curl --fail -s --compressed ${DISTRO_CHECK_CONSOLE_LOG} | grep 'BUNDLE_URL' | cut -d = -f 2)
-NETVIRT_ARTIFACT_URL="https://jenkins.opendaylight.org/releng/job/netvirt-distribution-check-carbon/${ODL_BUILD_JOB_NUM}/artifact/${ODL_ZIP}"
echo -e "URL to artifact is\n\t$NETVIRT_ARTIFACT_URL"
echo "Downloading the artifact. This could take time..."
diff --git a/jjb/3rd_party_ci/odl-netvirt.yml b/jjb/3rd_party_ci/odl-netvirt.yml
index 470e4335e..a937acbed 100644
--- a/jjb/3rd_party_ci/odl-netvirt.yml
+++ b/jjb/3rd_party_ci/odl-netvirt.yml
@@ -12,6 +12,10 @@
branch: '{stream}'
gs-pathname: ''
disabled: false
+ - carbon:
+ branch: 'stable/carbon'
+ gs-pathname: ''
+ disabled: false
#####################################
# patch verification phases
#####################################
@@ -111,6 +115,7 @@
- name: 'odl-netvirt-verify-virtual-install-netvirt-{stream}'
current-parameters: false
predefined-parameters: |
+ ODL_BRANCH={branch}
BRANCH=$BRANCH
GERRIT_REFSPEC=$GERRIT_REFSPEC
GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
@@ -125,10 +130,10 @@
name: functest
condition: SUCCESSFUL
projects:
- - name: 'functest-netvirt-virtual-suite-{stream}'
+ - name: 'functest-netvirt-virtual-suite-master'
predefined-parameters: |
DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
- FUNCTEST_SUITE_NAME=tempest_smoke_serial
+ FUNCTEST_SUITE_NAME=odl_netvirt
RC_FILE_PATH=$HOME/cloner-info/overcloudrc
node-parameters: true
kill-phase-on: FAILURE
diff --git a/jjb/apex/apex-iso-verify.sh b/jjb/apex/apex-iso-verify.sh
new file mode 100755
index 000000000..cdeac04d7
--- /dev/null
+++ b/jjb/apex/apex-iso-verify.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# log info to console
+echo "Starting the Apex iso verify."
+echo "--------------------------------------------------------"
+echo
+
+BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY
+
+source $BUILD_DIRECTORY/../opnfv.properties
+
+if ! rpm -q virt-install > /dev/null; then
+ sudo yum -y install virt-install
+fi
+
+# define a clean function
+rm_apex_iso_verify () {
+if sudo virsh list --all | grep apex-iso-verify | grep running; then
+ sudo virsh destroy apex-iso-verify
+fi
+if sudo virsh list --all | grep apex-iso-verify; then
+ sudo virsh undefine apex-iso-verify
+fi
+}
+
+# Make sure a pre-existing iso-verify isn't there
+rm_apex_iso_verify
+
+# run an install from the iso
+# This streams a serial console to tcp port 3737 on localhost
+sudo virt-install -n apex-iso-verify -r 4096 --vcpus 4 --os-variant=rhel7 \
+ --accelerate -v --noautoconsole --nographics \
+ --disk path=/var/lib/libvirt/images/apex-iso-verify.qcow2,size=30,format=qcow2 \
+ -l $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso \
+ --extra-args 'console=ttyS0 console=ttyS0,115200n8 serial inst.ks=file:/iso-verify.ks inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64:/' \
+ --initrd-inject $BUILD_DIRECTORY/../ci/iso-verify.ks \
+ --serial tcp,host=:3737,protocol=raw
+
+# Attach to tcpport 3737 and echo the output to stdout
+# watch for a 5 min time out, a power off message or a tcp disconnect
+python << EOP
+#!/usr/bin/env python
+
+import sys
+import socket
+from time import sleep
+from time import time
+
+
+TCP_IP = '127.0.0.1'
+TCP_PORT = 3737
+BUFFER_SIZE = 1024
+
+try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((TCP_IP, TCP_PORT))
+except Exception, e:
+ print "Failed to connect to the iso-verofy vm's serial console"
+ print "this probably means that the VM failed to start"
+ raise e
+
+activity = time()
+data = s.recv(BUFFER_SIZE)
+last_data = data
+while time() - activity < 300:
+ try:
+ if data != last_data:
+ activity = time()
+ last_data = data
+ data = s.recv(BUFFER_SIZE)
+ sys.stdout.write(data)
+ if 'Powering off' in data:
+ break
+ sleep(.5)
+ except socket.error, e:
+ # for now assuming that the connection was closed
+ # which is good, means the vm finished installing
+ # printing the error output just in case we need to debug
+ print "VM console connection lost: %s" % msg
+ break
+s.close()
+
+if time() - activity > 300:
+ print "failing due to console inactivity"
+ exit(1)
+else:
+ print "Success!"
+EOP
+
+# save the python return code for after cleanup
+python_rc=$?
+
+# clean up
+rm_apex_iso_verify
+
+# Exit with the RC of the Python job
+exit $python_rc
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
diff --git a/jjb/apex/apex-snapshot-deploy.sh b/jjb/apex/apex-snapshot-deploy.sh
index 06c002319..3eb3cf23a 100644
--- a/jjb/apex/apex-snapshot-deploy.sh
+++ b/jjb/apex/apex-snapshot-deploy.sh
@@ -129,6 +129,7 @@ if [ -z "$virsh_vm_defs" ]; then
fi
for node_def in ${virsh_vm_defs}; do
+ sed -ri "s/machine='[^\s]+'/machine='pc'/" ${node_def}
sudo virsh define ${node_def}
node=$(echo ${node_def} | awk -F '.' '{print $1}')
sudo cp -f ${node}.qcow2 /var/lib/libvirt/images/
diff --git a/jjb/apex/apex-upload-artifact.sh b/jjb/apex/apex-upload-artifact.sh
index c2de7d70d..d046c119d 100755
--- a/jjb/apex/apex-upload-artifact.sh
+++ b/jjb/apex/apex-upload-artifact.sh
@@ -3,8 +3,13 @@ set -o errexit
set -o nounset
set -o pipefail
+if [ -z "$ARTIFACT_TYPE" ]; then
+ echo "ERROR: ARTIFACT_TYPE not provided...exiting"
+ exit 1
+fi
+
# log info to console
-echo "Uploading the Apex artifact. This could take some time..."
+echo "Uploading the Apex ${ARTIFACT_TYPE} artifact. This could take some time..."
echo "--------------------------------------------------------"
echo
@@ -18,7 +23,7 @@ echo "Cloning releng repository..."
[ -d releng ] && rm -rf releng
git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null
#this is where we import the siging key
-if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
+if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
source $WORKSPACE/releng/utils/gpg_import_key.sh
fi
@@ -45,32 +50,18 @@ echo "ISO signature Upload Complete!"
}
uploadiso () {
-# upload artifact and additional files to google storage
-gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log
-echo "ISO Upload Complete!"
-RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
-RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
-VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud; do # removed onos for danube
- RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-done
-SRPM_INSTALL_PATH=$BUILD_DIRECTORY
-SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
-VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud; do # removed onos for danube
- SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-done
+ gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log
+ echo "ISO Upload Complete!"
}
uploadrpm () {
-#This is where we upload the rpms
-for artifact in $RPM_LIST $SRPM_LIST; do
- echo "Uploading artifact: ${artifact}"
- gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log
- echo "Upload complete for ${artifact}"
-done
-gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log
-gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log
+ for artifact in $RPM_LIST $SRPM_LIST; do
+ echo "Uploading artifact: ${artifact}"
+ gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log
+ echo "Upload complete for ${artifact}"
+ done
+ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log
+ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log
}
uploadsnap () {
@@ -84,21 +75,43 @@ uploadsnap () {
echo "Upload complete for Snapshot"
}
-if echo $WORKSPACE | grep promote > /dev/null; then
- uploadsnap
-elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
+if gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
echo "Signing Key avaliable"
- signiso
+ SIGN_ARTIFACT="true"
+fi
+
+if [ "$ARTIFACT_TYPE" == 'snapshot' ]; then
+ uploadsnap
+elif [ "$ARTIFACT_TYPE" == 'iso' ]; then
+ if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
+ signiso
+ fi
uploadiso
- signrpm
+elif [ "$ARTIFACT_TYPE" == 'rpm' ]; then
+ RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
+ RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
+ VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
+ for pkg in common undercloud; do # removed onos for danube
+ RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
+ done
+ SRPM_INSTALL_PATH=$BUILD_DIRECTORY
+ SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
+ VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
+ for pkg in common undercloud; do # removed onos for danube
+ SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
+ done
+
+ if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
+ signrpm
+ fi
uploadrpm
else
- uploadiso
- uploadrpm
+ echo "ERROR: Unknown artifact type ${ARTIFACT_TYPE} to upload...exiting"
+ exit 1
fi
echo
echo "--------------------------------------------------------"
echo "Done!"
-echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"
+if [ "$ARTIFACT_TYPE" == 'iso' ]; then echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"; fi
+if [ "$ARTIFACT_TYPE" == 'rpm' ]; then echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"; fi
diff --git a/jjb/apex/apex.yml b/jjb/apex/apex.yml
index e7982ba55..7ca2e6edd 100644
--- a/jjb/apex/apex.yml
+++ b/jjb/apex/apex.yml
@@ -12,6 +12,7 @@
- 'apex-daily-{stream}'
- 'apex-csit-promote-daily-{stream}'
- 'apex-fdio-promote-daily-{stream}'
+ - 'apex-verify-iso-{stream}'
# stream: branch with - in place of / (eg. stable-arno)
# branch: branch (eg. stable/arno)
@@ -443,8 +444,64 @@
git-revision: false
same-node: true
block: true
+ - inject:
+ properties-content: ARTIFACT_TYPE=rpm
+ - 'apex-upload-artifact'
+ - trigger-builds:
+ - project: 'apex-verify-iso-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ git-revision: false
+ block: true
+ same-node: true
+ - inject:
+ properties-content: ARTIFACT_TYPE=iso
- 'apex-upload-artifact'
+# ISO verify job
+- job-template:
+ name: 'apex-verify-iso-{stream}'
+
+ # Job template for builds
+ #
+ # Required Variables:
+ # stream: branch with - in place of / (eg. stable)
+ # branch: branch (eg. stable)
+ node: '{daily-slave}'
+
+ disabled: false
+
+ concurrent: true
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - apex-parameter:
+ gs-pathname: '{gs-pathname}'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: "Used for overriding the GIT URL coming from parameters macro."
+
+ scm:
+ - git-scm
+
+ properties:
+ - logrotate-default
+ - build-blocker:
+ use-build-blocker: true
+ block-level: 'NODE'
+ blocking-jobs:
+ - 'apex-deploy.*'
+ - throttle:
+ max-per-node: 1
+ max-total: 10
+ option: 'project'
+
+ builders:
+ - 'apex-iso-verify'
+
- job-template:
name: 'apex-deploy-virtual-{scenario}-{stream}'
@@ -616,7 +673,7 @@
# 4.not used for release criteria or compliance,
# only to debug the dovetail tool bugs with apex
#- trigger-builds:
- # - project: 'dovetail-apex-{slave}-debug-{stream}'
+ # - project: 'dovetail-apex-{slave}-proposed_tests-{stream}'
# current-parameters: false
# predefined-parameters:
# DEPLOY_SCENARIO=os-nosdn-nofeature-ha
@@ -807,7 +864,7 @@
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
- trigger-builds:
- - project: 'apex-deploy-baremetal-os-odl_l3-fdio-noha-{stream}'
+ - project: 'apex-deploy-baremetal-os-odl_l3-fdio-ha-{stream}'
predefined-parameters: |
BUILD_DIRECTORY=apex-build-{stream}/.build
OPNFV_CLEAN=yes
@@ -819,7 +876,7 @@
- trigger-builds:
- project: 'functest-apex-{daily-slave}-daily-{stream}'
predefined-parameters:
- DEPLOY_SCENARIO=os-odl_l3-fdio-noha
+ DEPLOY_SCENARIO=os-odl_l3-fdio-ha
block: true
same-node: true
block-thresholds:
@@ -829,7 +886,7 @@
- trigger-builds:
- project: 'yardstick-apex-{slave}-daily-{stream}'
predefined-parameters:
- DEPLOY_SCENARIO=os-odl_l3-fdio-noha
+ DEPLOY_SCENARIO=os-odl_l3-fdio-ha
block: true
same-node: true
block-thresholds:
@@ -1013,8 +1070,9 @@
same-node: true
- shell:
!include-raw-escape: ./apex-snapshot-create.sh
- - shell:
- !include-raw-escape: ./apex-upload-artifact.sh
+ - inject:
+ properties-content: ARTIFACT_TYPE=snapshot
+ - 'apex-upload-artifact'
# FDIO promote
- job-template:
@@ -1062,8 +1120,9 @@
same-node: true
- shell:
!include-raw-escape: ./apex-snapshot-create.sh
- - shell:
- !include-raw-escape: ./apex-upload-artifact.sh
+ - inject:
+ properties-content: ARTIFACT_TYPE=snapshot
+ - 'apex-upload-artifact'
- job-template:
name: 'apex-gs-clean-{stream}'
@@ -1147,6 +1206,13 @@
!include-raw: ./apex-workspace-cleanup.sh
- builder:
+ name: 'apex-iso-verify'
+ builders:
+ - shell:
+ !include-raw: ./apex-iso-verify.sh
+
+
+- builder:
name: 'apex-upload-artifact'
builders:
- shell:
diff --git a/jjb/armband/armband-ci-jobs.yml b/jjb/armband/armband-ci-jobs.yml
index 38a729de6..17d520419 100644
--- a/jjb/armband/armband-ci-jobs.yml
+++ b/jjb/armband/armband-ci-jobs.yml
@@ -56,8 +56,12 @@
slave-label: arm-pod3
installer: fuel
<<: *danube
- - arm-pod3-2:
- slave-label: arm-pod3-2
+ - arm-pod4:
+ slave-label: arm-pod4
+ installer: fuel
+ <<: *danube
+ - arm-virtual1:
+ slave-label: arm-virtual1
installer: fuel
<<: *danube
#--------------------------------
@@ -71,8 +75,12 @@
slave-label: arm-pod3
installer: fuel
<<: *master
- - arm-pod3-2:
- slave-label: arm-pod3-2
+ - arm-pod4:
+ slave-label: arm-pod4
+ installer: fuel
+ <<: *master
+ - arm-virtual1:
+ slave-label: arm-virtual1
installer: fuel
<<: *master
#--------------------------------
@@ -181,7 +189,7 @@
# 4.not used for release criteria or compliance,
# only to debug the dovetail tool bugs with arm pods
- trigger-builds:
- - project: 'dovetail-{installer}-{pod}-debug-{stream}'
+ - project: 'dovetail-{installer}-{pod}-proposed_tests-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
@@ -333,31 +341,31 @@
- trigger:
name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 1'
+ - timed: ''
- trigger:
name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 2'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 3'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 4'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 5'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-sfc-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 6'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 7'
+ - timed: ''
#--------------------------------------------------------------------
# Enea Armband CI Virtual Triggers running against danube branch
#--------------------------------------------------------------------
@@ -389,6 +397,71 @@
name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-danube-trigger'
triggers:
- timed: ''
+
+#--------------------------------------------------------------------
+# Enea Armband Non CI Virtual Triggers running against danube branch
+#--------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+
+#--------------------------------------------------------------------
+# Enea Armband Non CI Virtual Triggers running against master branch
+#--------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+
#----------------------------------------------------------
# Enea Armband POD 2 Triggers running against master branch
#----------------------------------------------------------
@@ -517,61 +590,61 @@
# Enea Armband POD 3 Triggers running against master branch (aarch64 slave)
#--------------------------------------------------------------------------
- trigger:
- name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod4-master-trigger'
triggers:
- timed: ''
#--------------------------------------------------------------------------
# Enea Armband POD 3 Triggers running against danube branch (aarch64 slave)
#--------------------------------------------------------------------------
- trigger:
- name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod4-danube-trigger'
triggers:
- timed: ''
diff --git a/jjb/armband/armband-deploy.sh b/jjb/armband/armband-deploy.sh
index 2e5aa3924..e445e0850 100755
--- a/jjb/armband/armband-deploy.sh
+++ b/jjb/armband/armband-deploy.sh
@@ -33,10 +33,10 @@ fi
# set deployment parameters
export TMPDIR=${WORKSPACE}/tmpdir
-# arm-pod3-2 is an aarch64 jenkins slave for the same POD as the
+# arm-pod4 is an aarch64 jenkins slave for the same POD as the
# x86 jenkins slave arm-pod3; therefore we use the same pod name
# to deploy the pod from both jenkins slaves
-if [[ "${NODE_NAME}" == "arm-pod3-2" ]]; then
+if [[ "${NODE_NAME}" == "arm-pod4" ]]; then
NODE_NAME="arm-pod3"
fi
diff --git a/jjb/bottlenecks/bottlenecks-project-jobs.yml b/jjb/bottlenecks/bottlenecks-project-jobs.yml
index a0abb9331..5dced2aad 100644
--- a/jjb/bottlenecks/bottlenecks-project-jobs.yml
+++ b/jjb/bottlenecks/bottlenecks-project-jobs.yml
@@ -70,8 +70,8 @@
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
builders:
- - bottlenecks-hello
- #- bottlenecks-unit-tests
+ #- bottlenecks-hello
+ - bottlenecks-unit-tests
- job-template:
name: 'bottlenecks-merge-{stream}'
@@ -206,10 +206,10 @@
# install python packages
easy_install -U setuptools
easy_install -U pip
- pip install -r requirements.txt
+ pip install -r $WORKSPACE/requirements/verify.txt
# unit tests
- /bin/bash $WORKSPACE/tests.sh
+ /bin/bash $WORKSPACE/verify.sh
deactivate
@@ -220,4 +220,4 @@
#!/bin/bash
set -o errexit
- echo "hello"
+ echo -e "Wellcome to Bottlenecks! \nMerge event is planning to support more functions! "
diff --git a/jjb/compass4nfv/compass-ci-jobs.yml b/jjb/compass4nfv/compass-ci-jobs.yml
index 237f8944d..61845acdf 100644
--- a/jjb/compass4nfv/compass-ci-jobs.yml
+++ b/jjb/compass4nfv/compass-ci-jobs.yml
@@ -160,7 +160,7 @@
#dovetail only master by now, not sync with A/B/C branches
#here the stream means the SUT stream, dovetail stream is defined in its own job
- trigger-builds:
- - project: 'dovetail-compass-{pod}-debug-{stream}'
+ - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
diff --git a/jjb/compass4nfv/compass-dovetail-jobs.yml b/jjb/compass4nfv/compass-dovetail-jobs.yml
index 30c80e648..c321655d7 100644
--- a/jjb/compass4nfv/compass-dovetail-jobs.yml
+++ b/jjb/compass4nfv/compass-dovetail-jobs.yml
@@ -98,7 +98,7 @@
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
- trigger-builds:
- - project: 'dovetail-compass-{pod}-debug-weekly-{stream}'
+ - project: 'dovetail-compass-{pod}-proposed_tests-weekly-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
diff --git a/jjb/compass4nfv/compass-project-jobs.yml b/jjb/compass4nfv/compass-project-jobs.yml
index f962518e0..59482459e 100644
--- a/jjb/compass4nfv/compass-project-jobs.yml
+++ b/jjb/compass4nfv/compass-project-jobs.yml
@@ -125,7 +125,7 @@
description: "URL to Google Storage."
- string:
name: PPA_REPO
- default: "http://205.177.226.237:9999{ppa-pathname}"
+ default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
- string:
name: PPA_CACHE
default: "$WORKSPACE/work/repo/"
diff --git a/jjb/compass4nfv/compass-verify-jobs.yml b/jjb/compass4nfv/compass-verify-jobs.yml
index 14279e649..56f54d838 100644
--- a/jjb/compass4nfv/compass-verify-jobs.yml
+++ b/jjb/compass4nfv/compass-verify-jobs.yml
@@ -339,7 +339,7 @@
description: "URL to Google Storage."
- string:
name: PPA_REPO
- default: "http://205.177.226.237:9999{ppa-pathname}"
+ default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
- string:
name: PPA_CACHE
default: "$WORKSPACE/work/repo/"
diff --git a/jjb/cperf/cperf-ci-jobs.yml b/jjb/cperf/cperf-ci-jobs.yml
index f6e068530..dc209d644 100644
--- a/jjb/cperf/cperf-ci-jobs.yml
+++ b/jjb/cperf/cperf-ci-jobs.yml
@@ -162,7 +162,7 @@
-v of_port:6653"
robot_suite="/home/opnfv/repos/odl_test/csit/suites/openflowplugin/Performance/010_Cbench.robot"
- docker run -ti -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
+ docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
- builder:
name: cperf-cleanup
diff --git a/jjb/daisy4nfv/daisy4nfv-basic.sh b/jjb/daisy4nfv/daisy4nfv-basic.sh
index 04b9b7bfa..87f5482e0 100755
--- a/jjb/daisy4nfv/daisy4nfv-basic.sh
+++ b/jjb/daisy4nfv/daisy4nfv-basic.sh
@@ -4,4 +4,3 @@ echo "--------------------------------------------------------"
echo "This is diasy4nfv basic job!"
echo "--------------------------------------------------------"
-sudo rm -rf /home/jenkins-ci/opnfv/slave_root/workspace/daisy4nfv-verify-build-master/*
diff --git a/jjb/daisy4nfv/daisy4nfv-build.sh b/jjb/daisy4nfv/daisy4nfv-build.sh
index 375d80733..925f68e18 100755
--- a/jjb/daisy4nfv/daisy4nfv-build.sh
+++ b/jjb/daisy4nfv/daisy4nfv-build.sh
@@ -1,5 +1,9 @@
#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
echo "--------------------------------------------------------"
echo "This is diasy4nfv build job!"
echo "--------------------------------------------------------"
diff --git a/jjb/daisy4nfv/daisy4nfv-download-artifact.sh b/jjb/daisy4nfv/daisy4nfv-download-artifact.sh
index 1cc0443ad..a64c80e5c 100755
--- a/jjb/daisy4nfv/daisy4nfv-download-artifact.sh
+++ b/jjb/daisy4nfv/daisy4nfv-download-artifact.sh
@@ -57,12 +57,18 @@ fi
# log info to console
echo "Downloading the $INSTALLER_TYPE artifact using URL http://$OPNFV_ARTIFACT_URL"
-echo "This could take some time..."
+echo "This could take some time... Now the time is $(date -u)"
echo "--------------------------------------------------------"
echo
# download the file
-curl -L -s -o $WORKSPACE/opnfv.bin http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
+if [[ "$NODE_NAME" =~ (zte) ]] && [ -x "$(command -v aria2c)" ]; then
+ DOWNLOAD_CMD="aria2c -x 3 --allow-overwrite=true -d $WORKSPACE -o opnfv.bin"
+else
+ DOWNLOAD_CMD="curl -L -s -o $WORKSPACE/opnfv.bin"
+fi
+
+$DOWNLOAD_CMD http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
# list the file
ls -al $WORKSPACE/opnfv.bin
diff --git a/jjb/doctor/doctor.yml b/jjb/doctor/doctor.yml
index c677ef96e..807d436da 100644
--- a/jjb/doctor/doctor.yml
+++ b/jjb/doctor/doctor.yml
@@ -112,7 +112,7 @@
# functest-suite-parameter
- string:
name: FUNCTEST_SUITE_NAME
- default: '{project}'
+ default: 'doctor-notification'
- string:
name: TESTCASE_OPTIONS
default: '-e INSPECTOR_TYPE={inspector} -e PROFILER_TYPE={profiler} -v $WORKSPACE:/home/opnfv/repos/doctor'
diff --git a/jjb/dovetail/dovetail-ci-jobs.yml b/jjb/dovetail/dovetail-ci-jobs.yml
index 869048088..682948d8b 100644
--- a/jjb/dovetail/dovetail-ci-jobs.yml
+++ b/jjb/dovetail/dovetail-ci-jobs.yml
@@ -137,10 +137,41 @@
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
+ - arm-virtual1:
+ slave-label: '{pod}'
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - zte-pod1:
+ slave-label: zte-pod1
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - zte-pod2:
+ slave-label: zte-pod2
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - zte-pod3:
+ slave-label: zte-pod3
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - zte-pod1:
+ slave-label: zte-pod1
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *danube
+ - zte-pod3:
+ slave-label: zte-pod3
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *danube
#--------------------------------
testsuite:
- 'debug'
- 'compliance_set'
+ - 'proposed_tests'
jobs:
- 'dovetail-{SUT}-{pod}-{testsuite}-{stream}'
diff --git a/jjb/dovetail/dovetail-run.sh b/jjb/dovetail/dovetail-run.sh
index 5161a3c7c..cee9e5929 100755
--- a/jjb/dovetail/dovetail-run.sh
+++ b/jjb/dovetail/dovetail-run.sh
@@ -32,10 +32,11 @@ if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FOR
sudo iptables -I FORWARD -j RETURN
fi
+releng_repo=${WORKSPACE}/releng
+[ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
+git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
+
if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
- releng_repo=${WORKSPACE}/releng
- [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
- git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
fi
@@ -47,16 +48,62 @@ else
exit 1
fi
+sudo pip install virtualenv
+
+cd ${releng_repo}/modules
+sudo virtualenv venv
+source venv/bin/activate
+sudo pip install -e ./ >/dev/null
+
+if [[ ${INSTALLER_TYPE} == compass ]]; then
+ options="-u root -p root"
+elif [[ ${INSTALLER_TYPE} == fuel ]]; then
+ options="-u root -p r00tme"
+else
+ echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
+ echo "HA test cases may not run properly."
+fi
+
+pod_file_dir="/home/opnfv/dovetail/userconfig"
+if [ -d ${pod_file_dir} ]; then
+ sudo rm -r ${pod_file_dir}/*
+else
+ sudo mkdir -p ${pod_file_dir}
+fi
+cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} -i ${INSTALLER_IP} ${options} -f ${pod_file_dir}/pod.yaml"
+echo ${cmd}
+${cmd}
+
+deactivate
+
+cd ${WORKSPACE}
+
+if [ -f ${pod_file_dir}/pod.yaml ]; then
+ echo "file ${pod_file_dir}/pod.yaml:"
+ cat ${pod_file_dir}/pod.yaml
+else
+ echo "Error: There doesn't exist file ${pod_file_dir}/pod.yaml."
+ echo "HA test cases may not run properly."
+fi
+
+ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+
+if [ "$INSTALLER_TYPE" == "fuel" ]; then
+ echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+ sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${pod_file_dir}/id_rsa
+fi
+
opts="--privileged=true -id"
results_envs="-v /var/run/docker.sock:/var/run/docker.sock \
-v /home/opnfv/dovetail/results:/home/opnfv/dovetail/results"
openrc_volume="-v ${OPENRC}:${OPENRC}"
+userconfig_volume="-v ${pod_file_dir}:${pod_file_dir}"
# Pull the image with correct tag
echo "Dovetail: Pulling image opnfv/dovetail:${DOCKER_TAG}"
docker pull opnfv/dovetail:$DOCKER_TAG >$redirect
-cmd="docker run ${opts} ${results_envs} ${openrc_volume} \
+cmd="docker run ${opts} ${results_envs} ${openrc_volume} ${userconfig_volume} \
${sshkey} opnfv/dovetail:${DOCKER_TAG} /bin/bash"
echo "Dovetail: running docker run command: ${cmd}"
${cmd} >${redirect}
diff --git a/jjb/dovetail/dovetail-weekly-jobs.yml b/jjb/dovetail/dovetail-weekly-jobs.yml
index 915feb5e8..700657d68 100644
--- a/jjb/dovetail/dovetail-weekly-jobs.yml
+++ b/jjb/dovetail/dovetail-weekly-jobs.yml
@@ -46,6 +46,7 @@
testsuite:
- 'debug'
- 'compliance_set'
+ - 'proposed_tests'
loop:
- 'weekly':
diff --git a/jjb/fuel/fuel-daily-jobs.yml b/jjb/fuel/fuel-daily-jobs.yml
index 32abad624..2fa868779 100644
--- a/jjb/fuel/fuel-daily-jobs.yml
+++ b/jjb/fuel/fuel-daily-jobs.yml
@@ -73,8 +73,8 @@
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-odl_l2-sfc-ha':
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- - 'os-odl_l2-bgpvpn-ha':
- auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
+ # - 'os-odl_l2-bgpvpn-ha':
+ # auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-nosdn-kvm-ha':
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-nosdn-ovs-ha':
@@ -112,6 +112,7 @@
jobs:
- 'fuel-{scenario}-{pod}-daily-{stream}'
- 'fuel-deploy-{pod}-daily-{stream}'
+ - 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}'
########################
# job templates
@@ -195,6 +196,103 @@
recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
- job-template:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: false
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-total: 4
+ max-per-node: 1
+ option: 'project'
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - 'fuel-os-.*?-{pod}-daily-.*'
+ - 'fuel-os-.*?-{pod}-weekly-.*'
+ block-level: 'NODE'
+
+ wrappers:
+ - build-name:
+ name: '$BUILD_NUMBER - Scenario: os-odl_l2-bgpvpn-ha'
+
+ triggers:
+ - 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}-trigger'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - '{installer}-defaults'
+ - '{slave-label}-defaults':
+ installer: '{installer}'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: "os-odl_l2-bgpvpn-ha"
+ - fuel-ci-parameter:
+ gs-pathname: '{gs-pathname}'
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME"
+ - trigger-builds:
+ - project: 'fuel-deploy-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+ same-node: true
+ block: true
+ - trigger-builds:
+ - project: 'functest-fuel-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+ same-node: true
+ block: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-fuel-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ # 1.dovetail only master by now, not sync with A/B/C branches
+ # 2.here the stream means the SUT stream, dovetail stream is defined in its own job
+ # 3.only debug testsuite here(includes basic testcase,
+ # i.e. refstack ipv6 vpn test cases from functest, HA test case
+ # from yardstick)
+ # 4.not used for release criteria or compliance,
+ # only to debug the dovetail tool bugs with fuel bgpvpn scenario
+ - trigger-builds:
+ - project: 'dovetail-fuel-{pod}-proposed_tests-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+
+ publishers:
+ - email:
+ recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com matthew.lijun@huawei.com
+
+
+- job-template:
name: 'fuel-deploy-{pod}-daily-{stream}'
disabled: '{obj:disabled}'
diff --git a/jjb/functest/functest-daily-jobs.yml b/jjb/functest/functest-daily-jobs.yml
index e8d14321f..3c04a4ac0 100644
--- a/jjb/functest/functest-daily-jobs.yml
+++ b/jjb/functest/functest-daily-jobs.yml
@@ -158,7 +158,11 @@
slave-label: '{pod}'
installer: fuel
<<: *master
- - arm-pod3-2:
+ - arm-pod4:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *master
+ - arm-virtual1:
slave-label: '{pod}'
installer: fuel
<<: *master
@@ -190,7 +194,11 @@
slave-label: '{pod}'
installer: fuel
<<: *danube
- - arm-pod3-2:
+ - arm-pod4:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *danube
+ - arm-virtual1:
slave-label: '{pod}'
installer: fuel
<<: *danube
@@ -294,6 +302,7 @@
- 'vims'
- 'multisite'
- 'parser'
+ - 'opera_vims'
- string:
name: TESTCASE_OPTIONS
default: ''
diff --git a/jjb/functest/functest-loop.sh b/jjb/functest/functest-loop.sh
index 893c428a2..869c3956c 100755
--- a/jjb/functest/functest-loop.sh
+++ b/jjb/functest/functest-loop.sh
@@ -1,15 +1,9 @@
#!/bin/bash
set +e
-branch=${GIT_BRANCH##*/}
-[[ "$PUSH_RESULTS_TO_DB" == "true" ]] && flags+="-r"
-if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
- cmd="${FUNCTEST_REPO_DIR}/docker/run_tests.sh -s ${flags}"
-elif [[ "$BRANCH" =~ 'colorado' ]]; then
- cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t all ${flags}"
-else
- cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
-fi
+
+cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
+
container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1)
docker exec $container_id $cmd
diff --git a/jjb/functest/set-functest-env.sh b/jjb/functest/set-functest-env.sh
index 05e3d5792..1acf0a2ad 100755
--- a/jjb/functest/set-functest-env.sh
+++ b/jjb/functest/set-functest-env.sh
@@ -70,6 +70,15 @@ envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
-e NODE_NAME=${NODE_NAME} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO} \
-e BUILD_TAG=${BUILD_TAG} -e CI_DEBUG=${CI_DEBUG} -e DEPLOY_TYPE=${DEPLOY_TYPE}"
+if [[ ${INSTALLER_TYPE} == 'compass' && ${DEPLOY_SCENARIO} == *'os-nosdn-openo-ha'* ]]; then
+ ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+ openo_msb_port=${openo_msb_port:-80}
+ openo_msb_endpoint="$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \
+ 'mysql -ucompass -pcompass -Dcompass -e "select package_config from cluster;" \
+ | sed s/,/\\n/g | grep openo_ip | cut -d \" -f 4'):$openo_msb_port"
+
+ envs=${env}" -e OPENO_MSB_ENDPOINT=${openo_msb_endpoint}"
+fi
volumes="${results_vol} ${sshkey_vol} ${stackrc_vol} ${rc_file_vol}"
@@ -103,12 +112,8 @@ if [ $(docker ps | grep "${FUNCTEST_IMAGE}:${DOCKER_TAG}" | wc -l) == 0 ]; then
echo "The container ${FUNCTEST_IMAGE} with ID=${container_id} has not been properly started. Exiting..."
exit 1
fi
-if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
- cmd="${FUNCTEST_REPO_DIR}/docker/prepare_env.sh"
-elif [[ "$BRANCH" =~ 'colorado' ]]; then
- cmd="python ${FUNCTEST_REPO_DIR}/ci/prepare_env.py start"
-else
- cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
-fi
+
+cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
+
echo "Executing command inside the docker: ${cmd}"
docker exec ${container_id} ${cmd}
diff --git a/jjb/global/slave-params.yml b/jjb/global/slave-params.yml
index 1905a098a..fad06b077 100644
--- a/jjb/global/slave-params.yml
+++ b/jjb/global/slave-params.yml
@@ -747,15 +747,33 @@
default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
description: 'Base URI to the configuration directory'
- parameter:
- name: 'arm-pod3-2-defaults'
+ name: 'arm-pod4-defaults'
parameters:
- node:
name: SLAVE_NAME
description: 'Slave name on Jenkins'
allowed-slaves:
- - arm-pod3-2
+ - arm-pod4
default-slaves:
- - arm-pod3-2
+ - arm-pod4
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+- parameter:
+ name: 'arm-virtual1-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-virtual1
+ default-slaves:
+ - arm-virtual1
- string:
name: GIT_BASE
default: https://gerrit.opnfv.org/gerrit/$PROJECT
diff --git a/jjb/joid/joid-daily-jobs.yml b/jjb/joid/joid-daily-jobs.yml
index 7dc718950..13ea9b308 100644
--- a/jjb/joid/joid-daily-jobs.yml
+++ b/jjb/joid/joid-daily-jobs.yml
@@ -164,7 +164,7 @@
# 4.not used for release criteria or compliance,
# only to debug the dovetail tool bugs with joid
#- trigger-builds:
- # - project: 'dovetail-joid-{pod}-debug-{stream}'
+ # - project: 'dovetail-joid-{pod}-proposed_tests-{stream}'
# current-parameters: false
# predefined-parameters:
# DEPLOY_SCENARIO={scenario}
diff --git a/jjb/kvmfornfv/kvmfornfv.yml b/jjb/kvmfornfv/kvmfornfv.yml
index 8d607f985..9624778f8 100644
--- a/jjb/kvmfornfv/kvmfornfv.yml
+++ b/jjb/kvmfornfv/kvmfornfv.yml
@@ -11,7 +11,7 @@
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: false
+ disabled: true
#####################################
# patch verification phases
#####################################
diff --git a/jjb/opera/opera-daily-jobs.yml b/jjb/opera/opera-daily-jobs.yml
index 5d2cc03f3..596d3771f 100644
--- a/jjb/opera/opera-daily-jobs.yml
+++ b/jjb/opera/opera-daily-jobs.yml
@@ -6,30 +6,32 @@
#####################################
# branch definitions
#####################################
- stream:
- - master:
- branch: '{stream}'
- gs-pathname: ''
- disabled: false
+ master: &master
+ stream: master
+ branch: '{stream}'
+ gs-pathname: ''
+ disabled: false
#####################################
-# patch verification phases
+# pod definitions
#####################################
- phase:
- - 'basic'
- - 'deploy'
+ pod:
+ - virtual:
+ slave-label: 'huawei-virtual7'
+ os-version: 'xenial'
+ <<: *master
#####################################
# jobs
#####################################
jobs:
- - 'opera-daily-{stream}'
- - 'opera-daily-{phase}-{stream}'
+ - 'opera-{pod}-daily-{stream}'
+
#####################################
# job templates
#####################################
- job-template:
- name: 'opera-daily-{stream}'
+ name: 'opera-{pod}-daily-{stream}'
project-type: multijob
@@ -62,86 +64,35 @@
- project-parameter:
project: '{project}'
branch: '{branch}'
- - 'huawei-virtual7-defaults'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: os-nosdn-openo-ha
+ - '{slave-label}-defaults'
builders:
- description-setter:
description: "Built on $NODE_NAME"
- multijob:
- name: basic
+ name: deploy
condition: SUCCESSFUL
projects:
- - name: 'opera-daily-basic-{stream}'
- current-parameters: true
+ - name: 'compass-deploy-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters: |
+ DEPLOY_SCENARIO=os-nosdn-openo-ha
+ COMPASS_OS_VERSION=xenial
node-parameters: true
kill-phase-on: FAILURE
abort-all-job: true
- multijob:
- name: deploy
+ name: functest
condition: SUCCESSFUL
projects:
- - name: 'compass-deploy-virtual-daily-{stream}'
+ - name: 'functest-compass-{pod}-suite-{stream}'
current-parameters: false
predefined-parameters: |
DEPLOY_SCENARIO=os-nosdn-openo-ha
- COMPASS_OS_VERSION=xenial
+ FUNCTEST_SUITE_NAME=opera_vims
node-parameters: true
- kill-phase-on: FAILURE
+ kill-phase-on: NEVER
abort-all-job: true
-# - multijob:
-# name: functest
-# condition: SUCCESSFUL
-# projects:
-# - name: 'functest-compass-baremetal-suite-{stream}'
-# current-parameters: false
-# predefined-parameters:
-# FUNCTEST_SUITE_NAME=opera
-# node-parameters: true
-# kill-phase-on: NEVER
-# abort-all-job: true
-
-- job-template:
- name: 'opera-daily-{phase}-{stream}'
-
- disabled: '{obj:disabled}'
-
- concurrent: true
-
- properties:
- - logrotate-default
- - throttle:
- enabled: true
- max-per-node: 1
- option: 'project'
-
- scm:
- - git-scm
-
- wrappers:
- - ssh-agent-wrapper
- - timeout:
- timeout: 120
- fail: true
-
- builders:
- - description-setter:
- description: "Built on $NODE_NAME"
- - '{project}-daily-{phase}-macro'
-
-#####################################
-# builder macros
-#####################################
-- builder:
- name: 'opera-daily-basic-macro'
- builders:
- - shell: |
- #!/bin/bash
- echo "Hello world!"
-
-- builder:
- name: 'opera-daily-deploy-macro'
- builders:
- - shell: |
- #!/bin/bash
- echo "Hello world!"
-
diff --git a/jjb/releng/opnfv-docker-arm.yml b/jjb/releng/opnfv-docker-arm.yml
index ba540ed76..417fc702c 100644
--- a/jjb/releng/opnfv-docker-arm.yml
+++ b/jjb/releng/opnfv-docker-arm.yml
@@ -18,6 +18,11 @@
receivers: >
cristina.pauna@enea.com
alexandru.avadanii@enea.com
+ dovetail-arm-receivers: &dovetail-arm-receivers
+ receivers: >
+ cristina.pauna@enea.com
+ alexandru.avadanii@enea.com
+ alexandru.nemes@enea.com
other-receivers: &other-receivers
receivers: ''
@@ -26,6 +31,9 @@
- 'functest':
<<: *master
<<: *functest-arm-receivers
+ - 'dovetail':
+ <<: *master
+ <<: *dovetail-arm-receivers
# projects with jobs for stable
jobs:
diff --git a/jjb/releng/opnfv-docker.sh b/jjb/releng/opnfv-docker.sh
index 5d73a9d70..2aa52adc5 100644
--- a/jjb/releng/opnfv-docker.sh
+++ b/jjb/releng/opnfv-docker.sh
@@ -75,14 +75,11 @@ echo "Current branch: $BRANCH"
if [[ "$BRANCH" == "master" ]]; then
DOCKER_TAG="latest"
+elif [[ -n "${RELEASE_VERSION-}" ]]; then
+ DOCKER_TAG=${BRANCH##*/}.${RELEASE_VERSION}
+ # e.g. danube.1.0, danube.2.0, danube.3.0
else
- if [[ -n "${RELEASE_VERSION-}" ]]; then
- release=${BRANCH##*/}
- DOCKER_TAG=${release}.${RELEASE_VERSION}
- # e.g. colorado.1.0, colorado.2.0, colorado.3.0
- else
- DOCKER_TAG="stable"
- fi
+ DOCKER_TAG="stable"
fi
# Start the build
@@ -90,6 +87,9 @@ echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG"
echo "--------------------------------------------------------"
echo
if [[ $DOCKER_REPO_NAME == *"dovetail"* ]]; then
+ if [[ -n "${RELEASE_VERSION-}" ]]; then
+ DOCKER_TAG=${RELEASE_VERSION}
+ fi
cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG -f $DOCKERFILE ."
else
cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH
diff --git a/jjb/securedlab/check-jinja2.sh b/jjb/securedlab/check-jinja2.sh
new file mode 100755
index 000000000..57650ec28
--- /dev/null
+++ b/jjb/securedlab/check-jinja2.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+set +x
+set -o errexit
+for lab_configs in $(find labs/ -name 'pod.yaml'); do
+ while IFS= read -r jinja_templates; do
+ echo "./utils/generate_config.py -y $lab_configs -j $jinja_templates"
+ ./utils/generate_config.py -y $lab_configs -j $jinja_templates
+ done < <(find installers/ -name '*.j2')
+done
diff --git a/jjb/securedlab/check-jinja2.yml b/jjb/securedlab/check-jinja2.yml
new file mode 100644
index 000000000..1e85536e7
--- /dev/null
+++ b/jjb/securedlab/check-jinja2.yml
@@ -0,0 +1,80 @@
+########################
+# Job configuration to validate jninja2 files
+########################
+- project:
+
+ name: validate-templates
+
+ project: 'securedlab'
+
+ jobs:
+ - 'validate-jinja2-templates-{stream}'
+
+ stream:
+ - master:
+ branch: '{stream}'
+ disabled: false
+ - danube:
+ branch: 'stable/{stream}'
+ disabled: false
+
+########################
+# job templates
+########################
+
+- job-template:
+ name: 'validate-jinja2-templates-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: true
+
+ parameters:
+ - project-parameter:
+ project: $GERRIT_PROJECT
+ branch: '{branch}'
+ - node:
+ name: SLAVE_NAME
+ description: Slave to execute jnija template test
+ default-slaves:
+ - lf-build1
+ allowed-multiselect: true
+ ignore-offline-nodes: true
+
+ scm:
+ - git-scm-gerrit
+
+ triggers:
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - patchset-created-event:
+ exclude-drafts: 'false'
+ exclude-trivial-rebase: 'false'
+ exclude-no-code-change: 'false'
+ - draft-published-event
+ - comment-added-contains-event:
+ comment-contains-value: 'recheck'
+ - comment-added-contains-event:
+ comment-contains-value: 'reverify'
+ projects:
+ - project-compare-type: 'REG_EXP'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ file-paths:
+ - compare-type: ANT
+ pattern: 'utils/generate_config.yml'
+ - compare-type: ANT
+ pattern: '**/*.jinja2'
+ - compare-type: ANT
+ pattern: '**/*.yaml'
+ builders:
+ - check-jinja
+
+- builder:
+ name: check-jinja
+ builders:
+ - shell:
+ !include-raw-escape: ./check-jinja2.sh
diff --git a/jjb/xci/bifrost-provision.sh b/jjb/xci/bifrost-provision.sh
index 4724c2ee5..b37da9059 100755
--- a/jjb/xci/bifrost-provision.sh
+++ b/jjb/xci/bifrost-provision.sh
@@ -82,13 +82,13 @@ sudo -E ./scripts/destroy-env.sh
# provision VMs for the flavor
cd /opt/bifrost
-sudo -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
# list the provisioned VMs
cd /opt/bifrost
source env-vars
ironic node-list
-virsh list
+sudo -H -E virsh list
echo "OpenStack nodes are provisioned!"
# here we have to do something in order to capture what was the working sha1
diff --git a/jjb/xci/bifrost-verify.sh b/jjb/xci/bifrost-verify.sh
index f596d7527..18019a7cb 100755
--- a/jjb/xci/bifrost-verify.sh
+++ b/jjb/xci/bifrost-verify.sh
@@ -113,14 +113,14 @@ sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
# cleanup remnants of previous deployment
cd /opt/bifrost
-sudo -E ./scripts/destroy-env.sh
+sudo -H -E ./scripts/destroy-env.sh
# provision 3 VMs; xcimaster, controller, and compute
cd /opt/bifrost
-sudo -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
# list the provisioned VMs
cd /opt/bifrost
source env-vars
ironic node-list
-virsh list
+sudo -H -E virsh list
diff --git a/jjb/yardstick/yardstick-ci-jobs.yml b/jjb/yardstick/yardstick-ci-jobs.yml
index 1f2f3122c..5ff36f842 100644
--- a/jjb/yardstick/yardstick-ci-jobs.yml
+++ b/jjb/yardstick/yardstick-ci-jobs.yml
@@ -182,6 +182,16 @@
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *danube
+ - arm-virtual1:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - arm-virtual1:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *danube
- orange-pod2:
slave-label: '{pod}'
installer: joid
@@ -338,6 +348,13 @@
default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
+ name: 'yardstick-params-arm-virtual1'
+ parameters:
+ - string:
+ name: YARDSTICK_DB_BACKEND
+ default: '-i 104.197.68.199:8086'
+ description: 'Arguments to use in order to choose the backend DB'
+- parameter:
name: 'yardstick-params-joid-baremetal'
parameters:
- string: