summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xjjb/apex/apex-iso-verify.sh104
-rwxr-xr-xjjb/apex/apex-upload-artifact.sh81
-rw-r--r--jjb/apex/apex.yml76
-rw-r--r--jjb/armband/armband-ci-jobs.yml89
-rw-r--r--jjb/bottlenecks/bottlenecks-project-jobs.yml10
-rw-r--r--jjb/compass4nfv/compass-ci-jobs.yml2
-rw-r--r--jjb/compass4nfv/compass-dovetail-jobs.yml2
-rw-r--r--jjb/compass4nfv/compass-project-jobs.yml2
-rw-r--r--jjb/compass4nfv/compass-verify-jobs.yml2
-rw-r--r--jjb/cperf/cperf-ci-jobs.yml2
-rwxr-xr-xjjb/daisy4nfv/daisy4nfv-basic.sh1
-rw-r--r--jjb/doctor/doctor.yml2
-rw-r--r--jjb/dovetail/dovetail-ci-jobs.yml6
-rw-r--r--jjb/dovetail/dovetail-weekly-jobs.yml1
-rw-r--r--jjb/functest/functest-daily-jobs.yml8
-rw-r--r--jjb/global/slave-params.yml18
-rw-r--r--jjb/joid/joid-daily-jobs.yml2
-rw-r--r--jjb/kvmfornfv/kvmfornfv.yml2
-rw-r--r--jjb/releng/opnfv-docker-arm.yml8
-rw-r--r--jjb/yardstick/yardstick-ci-jobs.yml17
-rw-r--r--modules/opnfv/deployment/compass/adapter.py6
-rw-r--r--prototypes/openstack-ansible/playbooks/configure-targethosts.yml2
-rw-r--r--prototypes/xci/file/exports14
-rw-r--r--prototypes/xci/file/ha/flavor-vars.yml26
-rw-r--r--prototypes/xci/file/ha/openstack_user_config.yml45
-rw-r--r--prototypes/xci/file/mini/configure-targethosts.yml32
-rw-r--r--prototypes/xci/file/mini/flavor-vars.yml9
-rw-r--r--prototypes/xci/file/mini/openstack_user_config.yml13
-rw-r--r--prototypes/xci/file/modules8
-rw-r--r--prototypes/xci/file/noha/configure-targethosts.yml36
-rw-r--r--prototypes/xci/file/noha/flavor-vars.yml12
-rw-r--r--prototypes/xci/file/noha/openstack_user_config.yml15
-rw-r--r--prototypes/xci/playbooks/configure-localhost.yml6
-rw-r--r--prototypes/xci/playbooks/configure-opnfvhost.yml18
-rw-r--r--prototypes/xci/playbooks/configure-targethosts.yml (renamed from prototypes/xci/file/ha/configure-targethosts.yml)14
-rw-r--r--prototypes/xci/playbooks/roles/configure-network/tasks/main.yml30
-rw-r--r--prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml21
-rw-r--r--prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml18
-rw-r--r--prototypes/xci/template/compute.interface.j219
-rw-r--r--prototypes/xci/template/controller.interface.j215
-rw-r--r--prototypes/xci/template/opnfv.interface.j215
-rw-r--r--utils/test/testapi/opnfv_testapi/common/check.py111
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/handlers.py96
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/pod_handlers.py20
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/project_handlers.py21
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/result_handlers.py43
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py29
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py42
48 files changed, 719 insertions, 452 deletions
diff --git a/jjb/apex/apex-iso-verify.sh b/jjb/apex/apex-iso-verify.sh
new file mode 100755
index 000000000..cdeac04d7
--- /dev/null
+++ b/jjb/apex/apex-iso-verify.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# log info to console
+echo "Starting the Apex iso verify."
+echo "--------------------------------------------------------"
+echo
+
+BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY
+
+source $BUILD_DIRECTORY/../opnfv.properties
+
+if ! rpm -q virt-install > /dev/null; then
+ sudo yum -y install virt-install
+fi
+
+# define a clean function
+rm_apex_iso_verify () {
+if sudo virsh list --all | grep apex-iso-verify | grep running; then
+ sudo virsh destroy apex-iso-verify
+fi
+if sudo virsh list --all | grep apex-iso-verify; then
+ sudo virsh undefine apex-iso-verify
+fi
+}
+
+# Make sure a pre-existing iso-verify isn't there
+rm_apex_iso_verify
+
+# run an install from the iso
+# This streams a serial console to tcp port 3737 on localhost
+sudo virt-install -n apex-iso-verify -r 4096 --vcpus 4 --os-variant=rhel7 \
+ --accelerate -v --noautoconsole --nographics \
+ --disk path=/var/lib/libvirt/images/apex-iso-verify.qcow2,size=30,format=qcow2 \
+ -l $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso \
+ --extra-args 'console=ttyS0 console=ttyS0,115200n8 serial inst.ks=file:/iso-verify.ks inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64:/' \
+ --initrd-inject $BUILD_DIRECTORY/../ci/iso-verify.ks \
+ --serial tcp,host=:3737,protocol=raw
+
+# Attach to tcpport 3737 and echo the output to stdout
+# watch for a 5 min time out, a power off message or a tcp disconnect
+python << EOP
+#!/usr/bin/env python
+
+import sys
+import socket
+from time import sleep
+from time import time
+
+
+TCP_IP = '127.0.0.1'
+TCP_PORT = 3737
+BUFFER_SIZE = 1024
+
+try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((TCP_IP, TCP_PORT))
+except Exception, e:
+ print "Failed to connect to the iso-verofy vm's serial console"
+ print "this probably means that the VM failed to start"
+ raise e
+
+activity = time()
+data = s.recv(BUFFER_SIZE)
+last_data = data
+while time() - activity < 300:
+ try:
+ if data != last_data:
+ activity = time()
+ last_data = data
+ data = s.recv(BUFFER_SIZE)
+ sys.stdout.write(data)
+ if 'Powering off' in data:
+ break
+ sleep(.5)
+ except socket.error, e:
+ # for now assuming that the connection was closed
+ # which is good, means the vm finished installing
+ # printing the error output just in case we need to debug
+ print "VM console connection lost: %s" % msg
+ break
+s.close()
+
+if time() - activity > 300:
+ print "failing due to console inactivity"
+ exit(1)
+else:
+ print "Success!"
+EOP
+
+# save the python return code for after cleanup
+python_rc=$?
+
+# clean up
+rm_apex_iso_verify
+
+# Exit with the RC of the Python job
+exit $python_rc
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
diff --git a/jjb/apex/apex-upload-artifact.sh b/jjb/apex/apex-upload-artifact.sh
index c2de7d70d..d046c119d 100755
--- a/jjb/apex/apex-upload-artifact.sh
+++ b/jjb/apex/apex-upload-artifact.sh
@@ -3,8 +3,13 @@ set -o errexit
set -o nounset
set -o pipefail
+if [ -z "$ARTIFACT_TYPE" ]; then
+ echo "ERROR: ARTIFACT_TYPE not provided...exiting"
+ exit 1
+fi
+
# log info to console
-echo "Uploading the Apex artifact. This could take some time..."
+echo "Uploading the Apex ${ARTIFACT_TYPE} artifact. This could take some time..."
echo "--------------------------------------------------------"
echo
@@ -18,7 +23,7 @@ echo "Cloning releng repository..."
[ -d releng ] && rm -rf releng
git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null
#this is where we import the siging key
-if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
+if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
source $WORKSPACE/releng/utils/gpg_import_key.sh
fi
@@ -45,32 +50,18 @@ echo "ISO signature Upload Complete!"
}
uploadiso () {
-# upload artifact and additional files to google storage
-gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log
-echo "ISO Upload Complete!"
-RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
-RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
-VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud; do # removed onos for danube
- RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-done
-SRPM_INSTALL_PATH=$BUILD_DIRECTORY
-SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
-VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud; do # removed onos for danube
- SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-done
+ gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log
+ echo "ISO Upload Complete!"
}
uploadrpm () {
-#This is where we upload the rpms
-for artifact in $RPM_LIST $SRPM_LIST; do
- echo "Uploading artifact: ${artifact}"
- gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log
- echo "Upload complete for ${artifact}"
-done
-gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log
-gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log
+ for artifact in $RPM_LIST $SRPM_LIST; do
+ echo "Uploading artifact: ${artifact}"
+ gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log
+ echo "Upload complete for ${artifact}"
+ done
+ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log
+ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log
}
uploadsnap () {
@@ -84,21 +75,43 @@ uploadsnap () {
echo "Upload complete for Snapshot"
}
-if echo $WORKSPACE | grep promote > /dev/null; then
- uploadsnap
-elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
+if gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
echo "Signing Key avaliable"
- signiso
+ SIGN_ARTIFACT="true"
+fi
+
+if [ "$ARTIFACT_TYPE" == 'snapshot' ]; then
+ uploadsnap
+elif [ "$ARTIFACT_TYPE" == 'iso' ]; then
+ if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
+ signiso
+ fi
uploadiso
- signrpm
+elif [ "$ARTIFACT_TYPE" == 'rpm' ]; then
+ RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
+ RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
+ VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
+ for pkg in common undercloud; do # removed onos for danube
+ RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
+ done
+ SRPM_INSTALL_PATH=$BUILD_DIRECTORY
+ SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
+ VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
+ for pkg in common undercloud; do # removed onos for danube
+ SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
+ done
+
+ if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
+ signrpm
+ fi
uploadrpm
else
- uploadiso
- uploadrpm
+ echo "ERROR: Unknown artifact type ${ARTIFACT_TYPE} to upload...exiting"
+ exit 1
fi
echo
echo "--------------------------------------------------------"
echo "Done!"
-echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"
+if [ "$ARTIFACT_TYPE" == 'iso' ]; then echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"; fi
+if [ "$ARTIFACT_TYPE" == 'rpm' ]; then echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"; fi
diff --git a/jjb/apex/apex.yml b/jjb/apex/apex.yml
index e7982ba55..56eac78e6 100644
--- a/jjb/apex/apex.yml
+++ b/jjb/apex/apex.yml
@@ -12,6 +12,7 @@
- 'apex-daily-{stream}'
- 'apex-csit-promote-daily-{stream}'
- 'apex-fdio-promote-daily-{stream}'
+ - 'apex-verify-iso-{stream}'
# stream: branch with - in place of / (eg. stable-arno)
# branch: branch (eg. stable/arno)
@@ -443,8 +444,64 @@
git-revision: false
same-node: true
block: true
+ - inject:
+ properties-content: ARTIFACT_TYPE=rpm
+ - 'apex-upload-artifact'
+ - trigger-builds:
+ - project: 'apex-verify-iso-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ git-revision: false
+ block: true
+ same-node: true
+ - inject:
+ properties-content: ARTIFACT_TYPE=iso
- 'apex-upload-artifact'
+# ISO verify job
+- job-template:
+ name: 'apex-verify-iso-{stream}'
+
+ # Job template for builds
+ #
+ # Required Variables:
+ # stream: branch with - in place of / (eg. stable)
+ # branch: branch (eg. stable)
+ node: '{daily-slave}'
+
+ disabled: false
+
+ concurrent: true
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - apex-parameter:
+ gs-pathname: '{gs-pathname}'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: "Used for overriding the GIT URL coming from parameters macro."
+
+ scm:
+ - git-scm
+
+ properties:
+ - logrotate-default
+ - build-blocker:
+ use-build-blocker: true
+ block-level: 'NODE'
+ blocking-jobs:
+ - 'apex-deploy.*'
+ - throttle:
+ max-per-node: 1
+ max-total: 10
+ option: 'project'
+
+ builders:
+ - 'apex-iso-verify'
+
- job-template:
name: 'apex-deploy-virtual-{scenario}-{stream}'
@@ -616,7 +673,7 @@
# 4.not used for release criteria or compliance,
# only to debug the dovetail tool bugs with apex
#- trigger-builds:
- # - project: 'dovetail-apex-{slave}-debug-{stream}'
+ # - project: 'dovetail-apex-{slave}-proposed_tests-{stream}'
# current-parameters: false
# predefined-parameters:
# DEPLOY_SCENARIO=os-nosdn-nofeature-ha
@@ -1013,8 +1070,9 @@
same-node: true
- shell:
!include-raw-escape: ./apex-snapshot-create.sh
- - shell:
- !include-raw-escape: ./apex-upload-artifact.sh
+ - inject:
+ properties-content: ARTIFACT_TYPE=snapshot
+ - 'apex-upload-artifact'
# FDIO promote
- job-template:
@@ -1062,8 +1120,9 @@
same-node: true
- shell:
!include-raw-escape: ./apex-snapshot-create.sh
- - shell:
- !include-raw-escape: ./apex-upload-artifact.sh
+ - inject:
+ properties-content: ARTIFACT_TYPE=snapshot
+ - 'apex-upload-artifact'
- job-template:
name: 'apex-gs-clean-{stream}'
@@ -1147,6 +1206,13 @@
!include-raw: ./apex-workspace-cleanup.sh
- builder:
+ name: 'apex-iso-verify'
+ builders:
+ - shell:
+ !include-raw: ./apex-iso-verify.sh
+
+
+- builder:
name: 'apex-upload-artifact'
builders:
- shell:
diff --git a/jjb/armband/armband-ci-jobs.yml b/jjb/armband/armband-ci-jobs.yml
index 38a729de6..ff855c1e0 100644
--- a/jjb/armband/armband-ci-jobs.yml
+++ b/jjb/armband/armband-ci-jobs.yml
@@ -60,6 +60,10 @@
slave-label: arm-pod3-2
installer: fuel
<<: *danube
+ - arm-virtual1:
+ slave-label: arm-virtual1
+ installer: fuel
+ <<: *danube
#--------------------------------
# master
#--------------------------------
@@ -75,6 +79,10 @@
slave-label: arm-pod3-2
installer: fuel
<<: *master
+ - arm-virtual1:
+ slave-label: arm-virtual1
+ installer: fuel
+ <<: *master
#--------------------------------
# scenarios
#--------------------------------
@@ -181,7 +189,7 @@
# 4.not used for release criteria or compliance,
# only to debug the dovetail tool bugs with arm pods
- trigger-builds:
- - project: 'dovetail-{installer}-{pod}-debug-{stream}'
+ - project: 'dovetail-{installer}-{pod}-proposed_tests-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
@@ -333,31 +341,31 @@
- trigger:
name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 1'
+ - timed: ''
- trigger:
name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 2'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 3'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 4'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 5'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-sfc-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 6'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 7'
+ - timed: ''
#--------------------------------------------------------------------
# Enea Armband CI Virtual Triggers running against danube branch
#--------------------------------------------------------------------
@@ -389,6 +397,71 @@
name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-danube-trigger'
triggers:
- timed: ''
+
+#--------------------------------------------------------------------
+# Enea Armband Non CI Virtual Triggers running against danube branch
+#--------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+
+#--------------------------------------------------------------------
+# Enea Armband Non CI Virtual Triggers running against master branch
+#--------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+
#----------------------------------------------------------
# Enea Armband POD 2 Triggers running against master branch
#----------------------------------------------------------
diff --git a/jjb/bottlenecks/bottlenecks-project-jobs.yml b/jjb/bottlenecks/bottlenecks-project-jobs.yml
index a0abb9331..5dced2aad 100644
--- a/jjb/bottlenecks/bottlenecks-project-jobs.yml
+++ b/jjb/bottlenecks/bottlenecks-project-jobs.yml
@@ -70,8 +70,8 @@
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
builders:
- - bottlenecks-hello
- #- bottlenecks-unit-tests
+ #- bottlenecks-hello
+ - bottlenecks-unit-tests
- job-template:
name: 'bottlenecks-merge-{stream}'
@@ -206,10 +206,10 @@
# install python packages
easy_install -U setuptools
easy_install -U pip
- pip install -r requirements.txt
+ pip install -r $WORKSPACE/requirements/verify.txt
# unit tests
- /bin/bash $WORKSPACE/tests.sh
+ /bin/bash $WORKSPACE/verify.sh
deactivate
@@ -220,4 +220,4 @@
#!/bin/bash
set -o errexit
- echo "hello"
+ echo -e "Wellcome to Bottlenecks! \nMerge event is planning to support more functions! "
diff --git a/jjb/compass4nfv/compass-ci-jobs.yml b/jjb/compass4nfv/compass-ci-jobs.yml
index 237f8944d..61845acdf 100644
--- a/jjb/compass4nfv/compass-ci-jobs.yml
+++ b/jjb/compass4nfv/compass-ci-jobs.yml
@@ -160,7 +160,7 @@
#dovetail only master by now, not sync with A/B/C branches
#here the stream means the SUT stream, dovetail stream is defined in its own job
- trigger-builds:
- - project: 'dovetail-compass-{pod}-debug-{stream}'
+ - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
diff --git a/jjb/compass4nfv/compass-dovetail-jobs.yml b/jjb/compass4nfv/compass-dovetail-jobs.yml
index 30c80e648..c321655d7 100644
--- a/jjb/compass4nfv/compass-dovetail-jobs.yml
+++ b/jjb/compass4nfv/compass-dovetail-jobs.yml
@@ -98,7 +98,7 @@
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
- trigger-builds:
- - project: 'dovetail-compass-{pod}-debug-weekly-{stream}'
+ - project: 'dovetail-compass-{pod}-proposed_tests-weekly-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
diff --git a/jjb/compass4nfv/compass-project-jobs.yml b/jjb/compass4nfv/compass-project-jobs.yml
index f962518e0..59482459e 100644
--- a/jjb/compass4nfv/compass-project-jobs.yml
+++ b/jjb/compass4nfv/compass-project-jobs.yml
@@ -125,7 +125,7 @@
description: "URL to Google Storage."
- string:
name: PPA_REPO
- default: "http://205.177.226.237:9999{ppa-pathname}"
+ default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
- string:
name: PPA_CACHE
default: "$WORKSPACE/work/repo/"
diff --git a/jjb/compass4nfv/compass-verify-jobs.yml b/jjb/compass4nfv/compass-verify-jobs.yml
index 14279e649..56f54d838 100644
--- a/jjb/compass4nfv/compass-verify-jobs.yml
+++ b/jjb/compass4nfv/compass-verify-jobs.yml
@@ -339,7 +339,7 @@
description: "URL to Google Storage."
- string:
name: PPA_REPO
- default: "http://205.177.226.237:9999{ppa-pathname}"
+ default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
- string:
name: PPA_CACHE
default: "$WORKSPACE/work/repo/"
diff --git a/jjb/cperf/cperf-ci-jobs.yml b/jjb/cperf/cperf-ci-jobs.yml
index f6e068530..dc209d644 100644
--- a/jjb/cperf/cperf-ci-jobs.yml
+++ b/jjb/cperf/cperf-ci-jobs.yml
@@ -162,7 +162,7 @@
-v of_port:6653"
robot_suite="/home/opnfv/repos/odl_test/csit/suites/openflowplugin/Performance/010_Cbench.robot"
- docker run -ti -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
+ docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
- builder:
name: cperf-cleanup
diff --git a/jjb/daisy4nfv/daisy4nfv-basic.sh b/jjb/daisy4nfv/daisy4nfv-basic.sh
index 04b9b7bfa..87f5482e0 100755
--- a/jjb/daisy4nfv/daisy4nfv-basic.sh
+++ b/jjb/daisy4nfv/daisy4nfv-basic.sh
@@ -4,4 +4,3 @@ echo "--------------------------------------------------------"
echo "This is diasy4nfv basic job!"
echo "--------------------------------------------------------"
-sudo rm -rf /home/jenkins-ci/opnfv/slave_root/workspace/daisy4nfv-verify-build-master/*
diff --git a/jjb/doctor/doctor.yml b/jjb/doctor/doctor.yml
index c677ef96e..807d436da 100644
--- a/jjb/doctor/doctor.yml
+++ b/jjb/doctor/doctor.yml
@@ -112,7 +112,7 @@
# functest-suite-parameter
- string:
name: FUNCTEST_SUITE_NAME
- default: '{project}'
+ default: 'doctor-notification'
- string:
name: TESTCASE_OPTIONS
default: '-e INSPECTOR_TYPE={inspector} -e PROFILER_TYPE={profiler} -v $WORKSPACE:/home/opnfv/repos/doctor'
diff --git a/jjb/dovetail/dovetail-ci-jobs.yml b/jjb/dovetail/dovetail-ci-jobs.yml
index 869048088..4998278c8 100644
--- a/jjb/dovetail/dovetail-ci-jobs.yml
+++ b/jjb/dovetail/dovetail-ci-jobs.yml
@@ -137,10 +137,16 @@
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
+ - arm-virtual1:
+ slave-label: '{pod}'
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
#--------------------------------
testsuite:
- 'debug'
- 'compliance_set'
+ - 'proposed_tests'
jobs:
- 'dovetail-{SUT}-{pod}-{testsuite}-{stream}'
diff --git a/jjb/dovetail/dovetail-weekly-jobs.yml b/jjb/dovetail/dovetail-weekly-jobs.yml
index 915feb5e8..700657d68 100644
--- a/jjb/dovetail/dovetail-weekly-jobs.yml
+++ b/jjb/dovetail/dovetail-weekly-jobs.yml
@@ -46,6 +46,7 @@
testsuite:
- 'debug'
- 'compliance_set'
+ - 'proposed_tests'
loop:
- 'weekly':
diff --git a/jjb/functest/functest-daily-jobs.yml b/jjb/functest/functest-daily-jobs.yml
index e8d14321f..80ca44b32 100644
--- a/jjb/functest/functest-daily-jobs.yml
+++ b/jjb/functest/functest-daily-jobs.yml
@@ -162,6 +162,10 @@
slave-label: '{pod}'
installer: fuel
<<: *master
+ - arm-virtual1:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *master
- zte-pod1:
slave-label: '{pod}'
installer: fuel
@@ -194,6 +198,10 @@
slave-label: '{pod}'
installer: fuel
<<: *danube
+ - arm-virtual1:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *danube
# PODs for verify jobs triggered by each patch upload
- ool-virtual1:
slave-label: '{pod}'
diff --git a/jjb/global/slave-params.yml b/jjb/global/slave-params.yml
index 1905a098a..fc89f143b 100644
--- a/jjb/global/slave-params.yml
+++ b/jjb/global/slave-params.yml
@@ -765,6 +765,24 @@
default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
description: 'Base URI to the configuration directory'
- parameter:
+ name: 'arm-virtual1-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-virtual1
+ default-slaves:
+ - arm-virtual1
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+- parameter:
name: 'intel-virtual6-defaults'
parameters:
- node:
diff --git a/jjb/joid/joid-daily-jobs.yml b/jjb/joid/joid-daily-jobs.yml
index 7dc718950..13ea9b308 100644
--- a/jjb/joid/joid-daily-jobs.yml
+++ b/jjb/joid/joid-daily-jobs.yml
@@ -164,7 +164,7 @@
# 4.not used for release criteria or compliance,
# only to debug the dovetail tool bugs with joid
#- trigger-builds:
- # - project: 'dovetail-joid-{pod}-debug-{stream}'
+ # - project: 'dovetail-joid-{pod}-proposed_tests-{stream}'
# current-parameters: false
# predefined-parameters:
# DEPLOY_SCENARIO={scenario}
diff --git a/jjb/kvmfornfv/kvmfornfv.yml b/jjb/kvmfornfv/kvmfornfv.yml
index 8d607f985..9624778f8 100644
--- a/jjb/kvmfornfv/kvmfornfv.yml
+++ b/jjb/kvmfornfv/kvmfornfv.yml
@@ -11,7 +11,7 @@
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: false
+ disabled: true
#####################################
# patch verification phases
#####################################
diff --git a/jjb/releng/opnfv-docker-arm.yml b/jjb/releng/opnfv-docker-arm.yml
index ba540ed76..417fc702c 100644
--- a/jjb/releng/opnfv-docker-arm.yml
+++ b/jjb/releng/opnfv-docker-arm.yml
@@ -18,6 +18,11 @@
receivers: >
cristina.pauna@enea.com
alexandru.avadanii@enea.com
+ dovetail-arm-receivers: &dovetail-arm-receivers
+ receivers: >
+ cristina.pauna@enea.com
+ alexandru.avadanii@enea.com
+ alexandru.nemes@enea.com
other-receivers: &other-receivers
receivers: ''
@@ -26,6 +31,9 @@
- 'functest':
<<: *master
<<: *functest-arm-receivers
+ - 'dovetail':
+ <<: *master
+ <<: *dovetail-arm-receivers
# projects with jobs for stable
jobs:
diff --git a/jjb/yardstick/yardstick-ci-jobs.yml b/jjb/yardstick/yardstick-ci-jobs.yml
index 1f2f3122c..5ff36f842 100644
--- a/jjb/yardstick/yardstick-ci-jobs.yml
+++ b/jjb/yardstick/yardstick-ci-jobs.yml
@@ -182,6 +182,16 @@
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *danube
+ - arm-virtual1:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - arm-virtual1:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *danube
- orange-pod2:
slave-label: '{pod}'
installer: joid
@@ -338,6 +348,13 @@
default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
+ name: 'yardstick-params-arm-virtual1'
+ parameters:
+ - string:
+ name: YARDSTICK_DB_BACKEND
+ default: '-i 104.197.68.199:8086'
+ description: 'Arguments to use in order to choose the backend DB'
+- parameter:
name: 'yardstick-params-joid-baremetal'
parameters:
- string:
diff --git a/modules/opnfv/deployment/compass/adapter.py b/modules/opnfv/deployment/compass/adapter.py
index 856c7fc38..38aa45227 100644
--- a/modules/opnfv/deployment/compass/adapter.py
+++ b/modules/opnfv/deployment/compass/adapter.py
@@ -7,6 +7,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
+import json
import netaddr
import re
@@ -161,9 +162,10 @@ class CompassAdapter(manager.DeploymentHandler):
fields = lines[i].strip().encode().rsplit('\t')
host_id = fields[0].strip().encode()
name = 'host{0}'.format(host_id)
- node_roles = fields[1].strip().encode().lower()
+ node_roles_str = fields[1].strip().encode().lower()
+ node_roles_list = json.loads(node_roles_str)
node_roles = [manager.Role.ODL if x == 'odl'
- else x for x in node_roles]
+ else x for x in node_roles_list]
roles = [x for x in [manager.Role.CONTROLLER,
manager.Role.COMPUTE,
manager.Role.ODL,
diff --git a/prototypes/openstack-ansible/playbooks/configure-targethosts.yml b/prototypes/openstack-ansible/playbooks/configure-targethosts.yml
index 1f4ad063e..538fe17ec 100644
--- a/prototypes/openstack-ansible/playbooks/configure-targethosts.yml
+++ b/prototypes/openstack-ansible/playbooks/configure-targethosts.yml
@@ -47,7 +47,7 @@
remote_user: root
tasks:
- name: make nfs dir
- file: "dest=/images mode=777 state=directory"
+ file: "dest=/images mode=0777 state=directory"
- name: configure sdrvice
shell: "echo 'nfs 2049/tcp' >> /etc/services && echo 'nfs 2049/udp' >> /etc/services"
- name: configure NFS
diff --git a/prototypes/xci/file/exports b/prototypes/xci/file/exports
deleted file mode 100644
index af64d618d..000000000
--- a/prototypes/xci/file/exports
+++ /dev/null
@@ -1,14 +0,0 @@
-# /etc/exports: the access control list for filesystems which may be exported
-# to NFS clients. See exports(5).
-#
-# Example for NFSv2 and NFSv3:
-# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
-#
-# Example for NFSv4:
-# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
-# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
-#
-# glance images are stored on compute host and made available to image hosts via nfs
-# see image_hosts section in openstack_user_config.yml for details
-/images *(rw,sync,no_subtree_check,no_root_squash)
-
diff --git a/prototypes/xci/file/ha/flavor-vars.yml b/prototypes/xci/file/ha/flavor-vars.yml
index 3cd1d6246..167502c95 100644
--- a/prototypes/xci/file/ha/flavor-vars.yml
+++ b/prototypes/xci/file/ha/flavor-vars.yml
@@ -1,37 +1,39 @@
---
host_info: {
'opnfv': {
- 'MGMT_IP': '172.29.236.10',
'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
'STORAGE_IP': '172.29.244.10'
},
'controller00': {
- 'MGMT_IP': '172.29.236.11',
'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
'STORAGE_IP': '172.29.244.11'
},
'controller01': {
- 'MGMT_IP': '172.29.236.12',
'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
'STORAGE_IP': '172.29.244.12'
},
'controller02': {
- 'MGMT_IP': '172.29.236.13',
'VLAN_IP': '192.168.122.5',
+ 'MGMT_IP': '172.29.236.13',
+ 'VXLAN_IP': '172.29.240.13',
'STORAGE_IP': '172.29.244.13'
},
'compute00': {
- 'MGMT_IP': '172.29.236.14',
'VLAN_IP': '192.168.122.6',
- 'STORAGE_IP': '172.29.244.14',
- 'VLAN_IP_SECOND': '173.29.241.1',
- 'VXLAN_IP': '172.29.240.14'
+ 'MGMT_IP': '172.29.236.14',
+ 'VXLAN_IP': '172.29.240.14',
+ 'STORAGE_IP': '172.29.244.14'
},
'compute01': {
- 'MGMT_IP': '172.29.236.15',
'VLAN_IP': '192.168.122.7',
- 'STORAGE_IP': '172.29.244.15',
- 'VLAN_IP_SECOND': '173.29.241.2',
- 'VXLAN_IP': '172.29.240.15'
+ 'MGMT_IP': '172.29.236.15',
+ 'VXLAN_IP': '172.29.240.15',
+ 'STORAGE_IP': '172.29.244.15'
}
}
diff --git a/prototypes/xci/file/ha/openstack_user_config.yml b/prototypes/xci/file/ha/openstack_user_config.yml
index 0c43702cb..09fb734c1 100644
--- a/prototypes/xci/file/ha/openstack_user_config.yml
+++ b/prototypes/xci/file/ha/openstack_user_config.yml
@@ -138,7 +138,7 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.15"
+ - server: "172.29.244.14"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -148,7 +148,7 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.15"
+ - server: "172.29.244.14"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -158,7 +158,7 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.15"
+ - server: "172.29.244.14"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -218,28 +218,37 @@ storage_hosts:
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.11"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
controller01:
ip: 172.29.236.12
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.12"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
controller02:
ip: 172.29.236.13
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.13"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
diff --git a/prototypes/xci/file/mini/configure-targethosts.yml b/prototypes/xci/file/mini/configure-targethosts.yml
deleted file mode 100644
index 395f44a64..000000000
--- a/prototypes/xci/file/mini/configure-targethosts.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- hosts: all
- remote_user: root
- tasks:
- - name: add public key to host
- copy:
- src: ../file/authorized_keys
- dest: /root/.ssh/authorized_keys
- - name: configure modules
- copy:
- src: ../file/modules
- dest: /etc/modules
-
-- hosts: controller
- remote_user: root
- vars_files:
- - ../var/{{ ansible_os_family }}.yml
- - ../var/flavor-vars.yml
- roles:
- # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute
- remote_user: root
- vars_files:
- - ../var/{{ ansible_os_family }}.yml
- - ../var/flavor-vars.yml
- roles:
- # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
- # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
- - role: configure-nfs
diff --git a/prototypes/xci/file/mini/flavor-vars.yml b/prototypes/xci/file/mini/flavor-vars.yml
index 01fba7129..0d446ba20 100644
--- a/prototypes/xci/file/mini/flavor-vars.yml
+++ b/prototypes/xci/file/mini/flavor-vars.yml
@@ -1,19 +1,20 @@
---
host_info: {
'opnfv': {
- 'MGMT_IP': '172.29.236.10',
'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
'STORAGE_IP': '172.29.244.10'
},
'controller00': {
- 'MGMT_IP': '172.29.236.11',
'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
'STORAGE_IP': '172.29.244.11'
},
'compute00': {
- 'MGMT_IP': '172.29.236.12',
'VLAN_IP': '192.168.122.4',
- 'VLAN_IP_SECOND': '173.29.241.1',
+ 'MGMT_IP': '172.29.236.12',
'VXLAN_IP': '172.29.240.12',
'STORAGE_IP': '172.29.244.12'
},
diff --git a/prototypes/xci/file/mini/openstack_user_config.yml b/prototypes/xci/file/mini/openstack_user_config.yml
index 70429cea9..f9ccee24f 100644
--- a/prototypes/xci/file/mini/openstack_user_config.yml
+++ b/prototypes/xci/file/mini/openstack_user_config.yml
@@ -160,8 +160,11 @@ storage_hosts:
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.11"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/prototypes/xci/file/modules b/prototypes/xci/file/modules
deleted file mode 100644
index 60a517f18..000000000
--- a/prototypes/xci/file/modules
+++ /dev/null
@@ -1,8 +0,0 @@
-# /etc/modules: kernel modules to load at boot time.
-#
-# This file contains the names of kernel modules that should be loaded
-# at boot time, one per line. Lines beginning with "#" are ignored.
-# Parameters can be specified after the module name.
-
-bonding
-8021q
diff --git a/prototypes/xci/file/noha/configure-targethosts.yml b/prototypes/xci/file/noha/configure-targethosts.yml
deleted file mode 100644
index 6dc147f3b..000000000
--- a/prototypes/xci/file/noha/configure-targethosts.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- hosts: all
- remote_user: root
- tasks:
- - name: add public key to host
- copy:
- src: ../file/authorized_keys
- dest: /root/.ssh/authorized_keys
- - name: configure modules
- copy:
- src: ../file/modules
- dest: /etc/modules
-
-- hosts: controller
- remote_user: root
- vars_files:
- - ../var/{{ ansible_os_family }}.yml
- - ../var/flavor-vars.yml
- roles:
- # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute
- remote_user: root
- vars_files:
- - ../var/{{ ansible_os_family }}.yml
- - ../var/flavor-vars.yml
- roles:
- # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute01
- remote_user: root
- # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
- roles:
- - role: configure-nfs
diff --git a/prototypes/xci/file/noha/flavor-vars.yml b/prototypes/xci/file/noha/flavor-vars.yml
index 7f52d343a..3c69a34bb 100644
--- a/prototypes/xci/file/noha/flavor-vars.yml
+++ b/prototypes/xci/file/noha/flavor-vars.yml
@@ -1,26 +1,26 @@
---
host_info: {
'opnfv': {
- 'MGMT_IP': '172.29.236.10',
'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
'STORAGE_IP': '172.29.244.10'
},
'controller00': {
- 'MGMT_IP': '172.29.236.11',
'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
'STORAGE_IP': '172.29.244.11'
},
'compute00': {
- 'MGMT_IP': '172.29.236.12',
'VLAN_IP': '192.168.122.4',
- 'VLAN_IP_SECOND': '173.29.241.1',
+ 'MGMT_IP': '172.29.236.12',
'VXLAN_IP': '172.29.240.12',
'STORAGE_IP': '172.29.244.12'
},
'compute01': {
- 'MGMT_IP': '172.29.236.13',
'VLAN_IP': '192.168.122.5',
- 'VLAN_IP_SECOND': '173.29.241.2',
+ 'MGMT_IP': '172.29.236.13',
'VXLAN_IP': '172.29.240.13',
'STORAGE_IP': '172.29.244.13'
}
diff --git a/prototypes/xci/file/noha/openstack_user_config.yml b/prototypes/xci/file/noha/openstack_user_config.yml
index 05de6a9c1..fb12655e7 100644
--- a/prototypes/xci/file/noha/openstack_user_config.yml
+++ b/prototypes/xci/file/noha/openstack_user_config.yml
@@ -118,7 +118,7 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.13"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -162,8 +162,11 @@ storage_hosts:
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.11"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/prototypes/xci/playbooks/configure-localhost.yml b/prototypes/xci/playbooks/configure-localhost.yml
index 2a559645e..34b974cd1 100644
--- a/prototypes/xci/playbooks/configure-localhost.yml
+++ b/prototypes/xci/playbooks/configure-localhost.yml
@@ -21,12 +21,6 @@
path: "{{LOG_PATH}}"
state: directory
recurse: no
- # when the deployment is not aio, we use playbook, configure-targethosts.yml, to configure all the hosts
- - name: copy multihost playbook
- copy:
- src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-targethosts.yml"
- dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
- when: XCI_FLAVOR != "aio"
# when the deployment is aio, we overwrite and use playbook, configure-opnfvhost.yml, since everything gets installed on opnfv host
- name: copy aio playbook
copy:
diff --git a/prototypes/xci/playbooks/configure-opnfvhost.yml b/prototypes/xci/playbooks/configure-opnfvhost.yml
index 8c794c422..64fcef0db 100644
--- a/prototypes/xci/playbooks/configure-opnfvhost.yml
+++ b/prototypes/xci/playbooks/configure-opnfvhost.yml
@@ -38,14 +38,6 @@
shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/user_variables.yml {{OPENSTACK_OSA_ETC_PATH}}"
- name: copy cinder.yml
shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/cinder.yml {{OPENSTACK_OSA_ETC_PATH}}/env.d"
- - name: bootstrap ansible on opnfv host
- command: "/bin/bash ./scripts/bootstrap-ansible.sh"
- args:
- chdir: "{{OPENSTACK_OSA_PATH}}"
- - name: generate password token
- command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
- args:
- chdir: "{{OPENSTACK_OSA_PATH}}/scripts"
# TODO: We need to get rid of this as soon as the issue is fixed upstream
- name: change the haproxy state from disable to enable
replace:
@@ -54,10 +46,16 @@
replace: '\1haproxy_state: enabled'
- name: copy OPNFV OpenStack playbook
shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
- # Copy pinned role requirements if we are running as part of daily CI loop
- name: copy OPNFV role requirements
shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}"
- when: XCI_LOOP == "daily"
+ - name: bootstrap ansible on opnfv host
+ command: "/bin/bash ./scripts/bootstrap-ansible.sh"
+ args:
+ chdir: "{{OPENSTACK_OSA_PATH}}"
+ - name: generate password token
+ command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
+ args:
+ chdir: "{{OPENSTACK_OSA_PATH}}/scripts"
- hosts: localhost
remote_user: root
tasks:
diff --git a/prototypes/xci/file/ha/configure-targethosts.yml b/prototypes/xci/playbooks/configure-targethosts.yml
index 6dc147f3b..50da1f223 100644
--- a/prototypes/xci/file/ha/configure-targethosts.yml
+++ b/prototypes/xci/playbooks/configure-targethosts.yml
@@ -6,10 +6,6 @@
copy:
src: ../file/authorized_keys
dest: /root/.ssh/authorized_keys
- - name: configure modules
- copy:
- src: ../file/modules
- dest: /etc/modules
- hosts: controller
remote_user: root
@@ -18,7 +14,9 @@
- ../var/flavor-vars.yml
roles:
# TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+ - { role: configure-network, src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+ # we need to force sync time with ntp or the nodes will be out of sync timewise
+ - role: synchronize-time
- hosts: compute
remote_user: root
@@ -27,9 +25,11 @@
- ../var/flavor-vars.yml
roles:
# TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+ - { role: configure-network, src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+ # we need to force sync time with ntp or the nodes will be out of sync timewise
+ - role: synchronize-time
-- hosts: compute01
+- hosts: compute00
remote_user: root
# TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
roles:
diff --git a/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml
index 8bc84822c..aafadf712 100644
--- a/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml
+++ b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml
@@ -8,9 +8,27 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# TODO: this role needs to be adjusted for different distros
-- name: configure network for {{ ansible_os_family }} on interface {{ interface }}
- template:
- src: "{{ src }}"
- dest: "{{ dest }}"
-- name: restart ubuntu xenial network service
- shell: "/sbin/ifconfig {{ interface }} 0 &&/sbin/ifdown -a && /sbin/ifup -a"
+- block:
+ - name: configure modules
+ lineinfile:
+ dest: /etc/modules
+ state: present
+ create: yes
+ line: "8021q"
+ - name: add modules
+ modprobe:
+ name: 8021q
+ state: present
+ - name: ensure glean rules are removed
+ file:
+ path: "/etc/udev/rules.d/99-glean.rules"
+ state: absent
+ - name: ensure interfaces.d folder is empty
+ shell: "/bin/rm -rf /etc/network/interfaces.d/*"
+ - name: ensure interfaces file is updated
+ template:
+ src: "{{ src }}"
+ dest: "{{ dest }}"
+ - name: restart network service
+ shell: "/sbin/ifconfig {{ interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
+ when: ansible_distribution_release == "xenial"
diff --git a/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml
index b188f4dbb..c52da0bf3 100644
--- a/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml
+++ b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml
@@ -9,11 +9,14 @@
##############################################################################
# TODO: this is for xenial and needs to be adjusted for different distros
- block:
- - name: make NFS dir
+ - name: make NFS directories
file:
- dest: /images
- mode: 777
+ dest: "{{ item }}"
+ mode: 0777
state: directory
+ with_items:
+ - "/images"
+ - "/volumes"
- name: configure NFS service
lineinfile:
dest: /etc/services
@@ -23,11 +26,15 @@
with_items:
- "nfs 2049/tcp"
- "nfs 2049/udp"
- - name: configure NFS exports on ubuntu xenial
- copy:
- src: ../file/exports
+ - name: configure NFS exports
+ lineinfile:
dest: /etc/exports
- when: ansible_distribution_release == "xenial"
+ state: present
+ create: yes
+ line: "{{ item }}"
+ with_items:
+ - "/images *(rw,sync,no_subtree_check,no_root_squash)"
+ - "/volumes *(rw,sync,no_subtree_check,no_root_squash)"
# TODO: the service name might be different on other distros and needs to be adjusted
- name: restart ubuntu xenial NFS service
service:
diff --git a/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml b/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml
new file mode 100644
index 000000000..5c39d897b
--- /dev/null
+++ b/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# TODO: this role needs to be adjusted for different distros
+- block:
+ - name: restart chrony
+ service:
+ name: chrony
+ state: restarted
+ - name: synchronize time
+ shell: "chronyc -a 'burst 4/4' && chronyc -a makestep"
+ when: ansible_distribution_release == "xenial"
diff --git a/prototypes/xci/template/compute.interface.j2 b/prototypes/xci/template/compute.interface.j2
index 0c5147c45..094544c3b 100644
--- a/prototypes/xci/template/compute.interface.j2
+++ b/prototypes/xci/template/compute.interface.j2
@@ -1,11 +1,7 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
# The loopback network interface
auto lo
iface lo inet loopback
-
# Physical interface
auto {{ interface }}
iface {{ interface }} inet manual
@@ -20,7 +16,7 @@ auto {{ interface }}.30
iface {{ interface }}.30 inet manual
vlan-raw-device {{ interface }}
-# Storage network VLAN interface (optional)
+# Storage network VLAN interface
auto {{ interface }}.20
iface {{ interface }}.20 inet manual
vlan-raw-device {{ interface }}
@@ -55,6 +51,7 @@ iface br-vlan inet static
address {{host_info[inventory_hostname].VLAN_IP}}
netmask 255.255.255.0
gateway 192.168.122.1
+ dns-nameserver 8.8.8.8 8.8.4.4
offload-sg off
# Create veth pair, don't bomb if already exists
pre-up ip link add br-vlan-veth type veth peer name eth12 || true
@@ -65,17 +62,7 @@ iface br-vlan inet static
post-down ip link del br-vlan-veth || true
bridge_ports br-vlan-veth
-# Add an additional address to br-vlan
-iface br-vlan inet static
- # Flat network default gateway
- # -- This needs to exist somewhere for network reachability
- # -- from the router namespace for floating IP paths.
- # -- Putting this here is primarily for tempest to work.
- address {{host_info[inventory_hostname].VLAN_IP_SECOND}}
- netmask 255.255.252.0
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# compute1 Storage bridge
+# OpenStack Storage bridge
auto br-storage
iface br-storage inet static
bridge_stp off
diff --git a/prototypes/xci/template/controller.interface.j2 b/prototypes/xci/template/controller.interface.j2
index fbaa8b8dd..638e78e18 100644
--- a/prototypes/xci/template/controller.interface.j2
+++ b/prototypes/xci/template/controller.interface.j2
@@ -1,6 +1,3 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
# The loopback network interface
auto lo
iface lo inet loopback
@@ -35,18 +32,14 @@ iface br-mgmt inet static
netmask 255.255.252.0
# OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports {{ interface }}.30
+ address {{host_info[inventory_hostname].VXLAN_IP}}
+ netmask 255.255.252.0
# OpenStack Networking VLAN bridge
auto br-vlan
@@ -60,7 +53,7 @@ iface br-vlan inet static
gateway 192.168.122.1
dns-nameserver 8.8.8.8 8.8.4.4
-# compute1 Storage bridge
+# OpenStack Storage bridge
auto br-storage
iface br-storage inet static
bridge_stp off
diff --git a/prototypes/xci/template/opnfv.interface.j2 b/prototypes/xci/template/opnfv.interface.j2
index fbaa8b8dd..e9f8649c6 100644
--- a/prototypes/xci/template/opnfv.interface.j2
+++ b/prototypes/xci/template/opnfv.interface.j2
@@ -1,6 +1,3 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
# The loopback network interface
auto lo
iface lo inet loopback
@@ -35,18 +32,14 @@ iface br-mgmt inet static
netmask 255.255.252.0
# OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports {{ interface }}.30
+ address {{ host_info[inventory_hostname].VXLAN_IP }}
+ netmask 255.255.252.0
# OpenStack Networking VLAN bridge
auto br-vlan
@@ -60,7 +53,7 @@ iface br-vlan inet static
gateway 192.168.122.1
dns-nameserver 8.8.8.8 8.8.4.4
-# compute1 Storage bridge
+# OpenStack Storage bridge
auto br-storage
iface br-storage inet static
bridge_stp off
diff --git a/utils/test/testapi/opnfv_testapi/common/check.py b/utils/test/testapi/opnfv_testapi/common/check.py
new file mode 100644
index 000000000..be4b1df12
--- /dev/null
+++ b/utils/test/testapi/opnfv_testapi/common/check.py
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import functools
+
+from tornado import web, gen
+
+from opnfv_testapi.common import raises, message
+
+
+def authenticate(method):
+ @web.asynchronous
+ @gen.coroutine
+ @functools.wraps(method)
+ def wrapper(self, *args, **kwargs):
+ if self.auth:
+ try:
+ token = self.request.headers['X-Auth-Token']
+ except KeyError:
+ raises.Unauthorized(message.unauthorized())
+ query = {'access_token': token}
+ check = yield self._eval_db_find_one(query, 'tokens')
+ if not check:
+ raises.Forbidden(message.invalid_token())
+ ret = yield gen.coroutine(method)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrapper
+
+
+def not_exist(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ query = kwargs.get('query')
+ data = yield self._eval_db_find_one(query)
+ if not data:
+ raises.NotFound(message.not_found(self.table, query))
+ ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
+ raise gen.Return(ret)
+
+ return wrap
+
+
+def no_body(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ if self.json_args is None:
+ raises.BadRequest(message.no_body())
+ ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+ raise gen.Return(ret)
+
+ return wrap
+
+
+def miss_fields(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ fields = kwargs.get('miss_fields')
+ if fields:
+ for miss in fields:
+ miss_data = self.json_args.get(miss)
+ if miss_data is None or miss_data == '':
+ raises.BadRequest(message.missing(miss))
+ ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrap
+
+
+def carriers_exist(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ carriers = kwargs.get('carriers')
+ if carriers:
+ for table, query in carriers:
+ exist = yield self._eval_db_find_one(query(), table)
+ if not exist:
+ raises.Forbidden(message.not_found(table, query()))
+ ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrap
+
+
+def new_not_exists(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ query = kwargs.get('query')
+ if query:
+ to_data = yield self._eval_db_find_one(query())
+ if to_data:
+ raises.Forbidden(message.exist(self.table, query()))
+ ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrap
+
+
+def updated_one_not_exist(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, data, *args, **kwargs):
+ db_keys = kwargs.get('db_keys')
+ query = self._update_query(db_keys, data)
+ if query:
+ to_data = yield self._eval_db_find_one(query)
+ if to_data:
+ raises.Forbidden(message.exist(self.table, query))
+ ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrap
diff --git a/utils/test/testapi/opnfv_testapi/resources/handlers.py b/utils/test/testapi/opnfv_testapi/resources/handlers.py
index 522bbe7f5..955fbbef7 100644
--- a/utils/test/testapi/opnfv_testapi/resources/handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/handlers.py
@@ -21,13 +21,13 @@
##############################################################################
from datetime import datetime
-import functools
import json
from tornado import gen
from tornado import web
import models
+from opnfv_testapi.common import check
from opnfv_testapi.common import message
from opnfv_testapi.common import raises
from opnfv_testapi.tornado_swagger import swagger
@@ -73,48 +73,20 @@ class GenericApiHandler(web.RequestHandler):
cls_data = self.table_cls.from_dict(data)
return cls_data.format_http()
- def authenticate(method):
- @web.asynchronous
- @gen.coroutine
- @functools.wraps(method)
- def wrapper(self, *args, **kwargs):
- if self.auth:
- try:
- token = self.request.headers['X-Auth-Token']
- except KeyError:
- raises.Unauthorized(message.unauthorized())
- query = {'access_token': token}
- check = yield self._eval_db_find_one(query, 'tokens')
- if not check:
- raises.Forbidden(message.invalid_token())
- ret = yield gen.coroutine(method)(self, *args, **kwargs)
- raise gen.Return(ret)
- return wrapper
-
- @authenticate
- def _create(self, miss_checks, db_checks, **kwargs):
+ @check.authenticate
+ @check.no_body
+ @check.miss_fields
+ @check.carriers_exist
+ @check.new_not_exists
+ def _create(self, **kwargs):
"""
:param miss_checks: [miss1, miss2]
:param db_checks: [(table, exist, query, error)]
"""
- if self.json_args is None:
- raises.BadRequest(message.no_body())
-
data = self.table_cls.from_dict(self.json_args)
- for miss in miss_checks:
- miss_data = data.__getattribute__(miss)
- if miss_data is None or miss_data == '':
- raises.BadRequest(message.missing(miss))
-
for k, v in kwargs.iteritems():
data.__setattr__(k, v)
- for table, exist, query, error in db_checks:
- check = yield self._eval_db_find_one(query(data), table)
- if (exist and not check) or (not exist and check):
- code, msg = error(data)
- raises.CodeTBD(code, msg)
-
if self.table != 'results':
data.creation_date = datetime.now()
_id = yield self._eval_db(self.table, 'insert', data.format(),
@@ -146,47 +118,27 @@ class GenericApiHandler(web.RequestHandler):
@web.asynchronous
@gen.coroutine
- def _get_one(self, query):
- data = yield self._eval_db_find_one(query)
- if data is None:
- raises.NotFound(message.not_found(self.table, query))
+ @check.not_exist
+ def _get_one(self, data, query=None):
self.finish_request(self.format_data(data))
- @authenticate
- def _delete(self, query):
- data = yield self._eval_db_find_one(query)
- if data is None:
- raises.NotFound(message.not_found(self.table, query))
-
+ @check.authenticate
+ @check.not_exist
+ def _delete(self, data, query=None):
yield self._eval_db(self.table, 'remove', query)
self.finish_request()
- @authenticate
- def _update(self, query, db_keys):
- if self.json_args is None:
- raises.BadRequest(message.no_body())
-
- # check old data exist
- from_data = yield self._eval_db_find_one(query)
- if from_data is None:
- raises.NotFound(message.not_found(self.table, query))
-
- data = self.table_cls.from_dict(from_data)
- # check new data exist
- equal, new_query = self._update_query(db_keys, data)
- if not equal:
- to_data = yield self._eval_db_find_one(new_query)
- if to_data is not None:
- raises.Forbidden(message.exist(self.table, new_query))
-
- # we merge the whole document """
- edit_request = self._update_requests(data)
-
- """ Updating the DB """
- yield self._eval_db(self.table, 'update', query, edit_request,
+ @check.authenticate
+ @check.no_body
+ @check.not_exist
+ @check.updated_one_not_exist
+ def _update(self, data, query=None, **kwargs):
+ data = self.table_cls.from_dict(data)
+ update_req = self._update_requests(data)
+ yield self._eval_db(self.table, 'update', query, update_req,
check_keys=False)
- edit_request['_id'] = str(data._id)
- self.finish_request(edit_request)
+ update_req['_id'] = str(data._id)
+ self.finish_request(update_req)
def _update_requests(self, data):
request = dict()
@@ -219,13 +171,13 @@ class GenericApiHandler(web.RequestHandler):
equal = True
for key in keys:
new = self.json_args.get(key)
- old = data.__getattribute__(key)
+ old = data.get(key)
if new is None:
new = old
elif new != old:
equal = False
query[key] = new
- return equal, query
+ return query if not equal else dict()
def _eval_db(self, table, method, *args, **kwargs):
exec_collection = self.db.__getattr__(table)
diff --git a/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py b/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py
index 2c303c934..e21841d33 100644
--- a/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py
@@ -6,10 +6,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import httplib
-
import handlers
-from opnfv_testapi.common import message
from opnfv_testapi.tornado_swagger import swagger
import pod_models
@@ -43,15 +40,10 @@ class PodCLHandler(GenericPodHandler):
@raise 403: pod already exists
@raise 400: body or name not provided
"""
- def query(data):
- return {'name': data.name}
-
- def error(data):
- return httplib.FORBIDDEN, message.exist('pod', data.name)
-
- miss_checks = ['name']
- db_checks = [(self.table, False, query, error)]
- self._create(miss_checks, db_checks)
+ def query():
+ return {'name': self.json_args.get('name')}
+ miss_fields = ['name']
+ self._create(miss_fields=miss_fields, query=query)
class PodGURHandler(GenericPodHandler):
@@ -63,9 +55,7 @@ class PodGURHandler(GenericPodHandler):
@return 200: pod exist
@raise 404: pod not exist
"""
- query = dict()
- query['name'] = pod_name
- self._get_one(query)
+ self._get_one(query={'name': pod_name})
def delete(self, pod_name):
""" Remove a POD
diff --git a/utils/test/testapi/opnfv_testapi/resources/project_handlers.py b/utils/test/testapi/opnfv_testapi/resources/project_handlers.py
index 59e0b88e5..d79cd3b61 100644
--- a/utils/test/testapi/opnfv_testapi/resources/project_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/project_handlers.py
@@ -6,10 +6,8 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import httplib
import handlers
-from opnfv_testapi.common import message
from opnfv_testapi.tornado_swagger import swagger
import project_models
@@ -45,15 +43,10 @@ class ProjectCLHandler(GenericProjectHandler):
@raise 403: project already exists
@raise 400: body or name not provided
"""
- def query(data):
- return {'name': data.name}
-
- def error(data):
- return httplib.FORBIDDEN, message.exist('project', data.name)
-
- miss_checks = ['name']
- db_checks = [(self.table, False, query, error)]
- self._create(miss_checks, db_checks)
+ def query():
+ return {'name': self.json_args.get('name')}
+ miss_fields = ['name']
+ self._create(miss_fields=miss_fields, query=query)
class ProjectGURHandler(GenericProjectHandler):
@@ -65,7 +58,7 @@ class ProjectGURHandler(GenericProjectHandler):
@return 200: project exist
@raise 404: project not exist
"""
- self._get_one({'name': project_name})
+ self._get_one(query={'name': project_name})
@swagger.operation(nickname="updateProjectByName")
def put(self, project_name):
@@ -81,7 +74,7 @@ class ProjectGURHandler(GenericProjectHandler):
"""
query = {'name': project_name}
db_keys = ['name']
- self._update(query, db_keys)
+ self._update(query=query, db_keys=db_keys)
@swagger.operation(nickname='deleteProjectByName')
def delete(self, project_name):
@@ -90,4 +83,4 @@ class ProjectGURHandler(GenericProjectHandler):
@return 200: delete success
@raise 404: project not exist
"""
- self._delete({'name': project_name})
+ self._delete(query={'name': project_name})
diff --git a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
index fb5ed9ec7..214706f5f 100644
--- a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
@@ -8,7 +8,6 @@
##############################################################################
from datetime import datetime
from datetime import timedelta
-import httplib
from bson import objectid
@@ -127,7 +126,9 @@ class ResultsCLHandler(GenericResultHandler):
if last is not None:
last = self.get_int('last', last)
- self._list(self.set_query(), sort=[('start_date', -1)], last=last)
+ self._list(query=self.set_query(),
+ sort=[('start_date', -1)],
+ last=last)
@swagger.operation(nickname="createTestResult")
def post(self):
@@ -141,31 +142,21 @@ class ResultsCLHandler(GenericResultHandler):
@raise 404: pod/project/testcase not exist
@raise 400: body/pod_name/project_name/case_name not provided
"""
- def pod_query(data):
- return {'name': data.pod_name}
+ def pod_query():
+ return {'name': self.json_args.get('pod_name')}
- def pod_error(data):
- return httplib.FORBIDDEN, message.not_found('pod', data.pod_name)
+ def project_query():
+ return {'name': self.json_args.get('project_name')}
- def project_query(data):
- return {'name': data.project_name}
+ def testcase_query():
+ return {'project_name': self.json_args.get('project_name'),
+ 'name': self.json_args.get('case_name')}
- def project_error(data):
- return httplib.FORBIDDEN, message.not_found('project',
- data.project_name)
-
- def testcase_query(data):
- return {'project_name': data.project_name, 'name': data.case_name}
-
- def testcase_error(data):
- return httplib.FORBIDDEN, message.not_found('testcase',
- data.case_name)
-
- miss_checks = ['pod_name', 'project_name', 'case_name']
- db_checks = [('pods', True, pod_query, pod_error),
- ('projects', True, project_query, project_error),
- ('testcases', True, testcase_query, testcase_error)]
- self._create(miss_checks, db_checks)
+ miss_fields = ['pod_name', 'project_name', 'case_name']
+ carriers = [('pods', pod_query),
+ ('projects', project_query),
+ ('testcases', testcase_query)]
+ self._create(miss_fields=miss_fields, carriers=carriers)
class ResultsGURHandler(GenericResultHandler):
@@ -179,7 +170,7 @@ class ResultsGURHandler(GenericResultHandler):
"""
query = dict()
query["_id"] = objectid.ObjectId(result_id)
- self._get_one(query)
+ self._get_one(query=query)
@swagger.operation(nickname="updateTestResultById")
def put(self, result_id):
@@ -195,4 +186,4 @@ class ResultsGURHandler(GenericResultHandler):
"""
query = {'_id': objectid.ObjectId(result_id)}
db_keys = []
- self._update(query, db_keys)
+ self._update(query=query, db_keys=db_keys)
diff --git a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
index bad79fdc6..5d420a56e 100644
--- a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
@@ -1,5 +1,4 @@
import functools
-import httplib
from opnfv_testapi.common import message
from opnfv_testapi.common import raises
@@ -65,7 +64,7 @@ class ScenariosCLHandler(GenericScenarioHandler):
query['installers'] = {'$elemMatch': elem_query}
return query
- self._list(_set_query())
+ self._list(query=_set_query())
@swagger.operation(nickname="createScenario")
def post(self):
@@ -79,15 +78,10 @@ class ScenariosCLHandler(GenericScenarioHandler):
@raise 403: scenario already exists
@raise 400: body or name not provided
"""
- def query(data):
- return {'name': data.name}
-
- def error(data):
- return httplib.FORBIDDEN, message.exist('scenario', data.name)
-
- miss_checks = ['name']
- db_checks = [(self.table, False, query, error)]
- self._create(miss_checks=miss_checks, db_checks=db_checks)
+ def query():
+ return {'name': self.json_args.get('name')}
+ miss_fields = ['name']
+ self._create(miss_fields=miss_fields, query=query)
class ScenarioGURHandler(GenericScenarioHandler):
@@ -99,7 +93,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
@return 200: scenario exist
@raise 404: scenario not exist
"""
- self._get_one({'name': name})
+ self._get_one(query={'name': name})
pass
@swagger.operation(nickname="updateScenarioByName")
@@ -116,7 +110,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
"""
query = {'name': name}
db_keys = ['name']
- self._update(query, db_keys)
+ self._update(query=query, db_keys=db_keys)
@swagger.operation(nickname="deleteScenarioByName")
def delete(self, name):
@@ -126,19 +120,16 @@ class ScenarioGURHandler(GenericScenarioHandler):
@raise 404: scenario not exist:
"""
- query = {'name': name}
- self._delete(query)
+ self._delete(query={'name': name})
def _update_query(self, keys, data):
query = dict()
- equal = True
if self._is_rename():
new = self._term.get('name')
- if data.name != new:
- equal = False
+ if data.get('name') != new:
query['name'] = new
- return equal, query
+ return query
def _update_requests(self, data):
updates = {
diff --git a/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py b/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py
index bc22b74e2..9399326f0 100644
--- a/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py
@@ -6,9 +6,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import httplib
-from opnfv_testapi.common import message
from opnfv_testapi.resources import handlers
from opnfv_testapi.resources import testcase_models
from opnfv_testapi.tornado_swagger import swagger
@@ -32,9 +30,7 @@ class TestcaseCLHandler(GenericTestcaseHandler):
empty list is no testcase exist in this project
@rtype: L{TestCases}
"""
- query = dict()
- query['project_name'] = project_name
- self._list(query)
+ self._list(query={'project_name': project_name})
@swagger.operation(nickname="createTestCase")
def post(self, project_name):
@@ -49,26 +45,18 @@ class TestcaseCLHandler(GenericTestcaseHandler):
or testcase already exists in this project
@raise 400: body or name not provided
"""
- def p_query(data):
- return {'name': data.project_name}
-
- def tc_query(data):
- return {
- 'project_name': data.project_name,
- 'name': data.name
- }
-
- def p_error(data):
- return httplib.FORBIDDEN, message.not_found('project',
- data.project_name)
-
- def tc_error(data):
- return httplib.FORBIDDEN, message.exist('testcase', data.name)
+ def project_query():
+ return {'name': project_name}
- miss_checks = ['name']
- db_checks = [(self.db_projects, True, p_query, p_error),
- (self.db_testcases, False, tc_query, tc_error)]
- self._create(miss_checks, db_checks, project_name=project_name)
+ def testcase_query():
+ return {'project_name': project_name,
+ 'name': self.json_args.get('name')}
+ miss_fields = ['name']
+ carriers = [(self.db_projects, project_query)]
+ self._create(miss_fields=miss_fields,
+ carriers=carriers,
+ query=testcase_query,
+ project_name=project_name)
class TestcaseGURHandler(GenericTestcaseHandler):
@@ -84,7 +72,7 @@ class TestcaseGURHandler(GenericTestcaseHandler):
query = dict()
query['project_name'] = project_name
query["name"] = case_name
- self._get_one(query)
+ self._get_one(query=query)
@swagger.operation(nickname="updateTestCaseByName")
def put(self, project_name, case_name):
@@ -102,7 +90,7 @@ class TestcaseGURHandler(GenericTestcaseHandler):
"""
query = {'project_name': project_name, 'name': case_name}
db_keys = ['name', 'project_name']
- self._update(query, db_keys)
+ self._update(query=query, db_keys=db_keys)
@swagger.operation(nickname='deleteTestCaseByName')
def delete(self, project_name, case_name):
@@ -112,4 +100,4 @@ class TestcaseGURHandler(GenericTestcaseHandler):
@raise 404: testcase not exist
"""
query = {'project_name': project_name, 'name': case_name}
- self._delete(query)
+ self._delete(query=query)