summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--docs/jenkins-job-builder/opnfv-jjb-usage.rst8
-rwxr-xr-xjjb/apex/apex-build.sh8
-rwxr-xr-xjjb/apex/apex-deploy.sh24
-rwxr-xr-xjjb/apex/apex-iso-verify.sh104
-rw-r--r--jjb/apex/apex-snapshot-deploy.sh2
-rwxr-xr-xjjb/apex/apex-unit-test.sh2
-rwxr-xr-xjjb/apex/apex-upload-artifact.sh81
-rw-r--r--jjb/apex/apex.yml82
-rw-r--r--jjb/armband/armband-ci-jobs.yml125
-rwxr-xr-xjjb/armband/armband-deploy.sh4
-rw-r--r--jjb/bottlenecks/bottlenecks-cleanup.sh12
-rw-r--r--jjb/bottlenecks/bottlenecks-project-jobs.yml10
-rw-r--r--jjb/bottlenecks/docker_cleanup.sh106
-rw-r--r--jjb/compass4nfv/compass-ci-jobs.yml2
-rw-r--r--jjb/compass4nfv/compass-dovetail-jobs.yml25
-rw-r--r--jjb/compass4nfv/compass-project-jobs.yml2
-rw-r--r--jjb/compass4nfv/compass-verify-jobs.yml2
-rw-r--r--jjb/cperf/cperf-ci-jobs.yml20
-rw-r--r--jjb/daisy4nfv/daisy-daily-jobs.yml1
-rw-r--r--jjb/daisy4nfv/daisy-project-jobs.yml11
-rwxr-xr-xjjb/daisy4nfv/daisy4nfv-basic.sh1
-rwxr-xr-xjjb/daisy4nfv/daisy4nfv-build.sh4
-rw-r--r--jjb/daisy4nfv/daisy4nfv-merge-jobs.yml5
-rw-r--r--jjb/daisy4nfv/daisy4nfv-verify-jobs.yml46
-rw-r--r--jjb/doctor/doctor.yml2
-rw-r--r--jjb/dovetail/dovetail-ci-jobs.yml11
-rwxr-xr-xjjb/dovetail/dovetail-run.sh50
-rw-r--r--jjb/dovetail/dovetail-weekly-jobs.yml36
-rw-r--r--jjb/functest/functest-daily-jobs.yml12
-rw-r--r--jjb/global/slave-params.yml24
-rw-r--r--jjb/joid/joid-daily-jobs.yml2
-rw-r--r--jjb/kvmfornfv/kvmfornfv.yml2
-rw-r--r--jjb/opera/opera-daily-jobs.yml105
-rw-r--r--jjb/releng/opnfv-docker-arm.yml8
-rw-r--r--jjb/releng/opnfv-docker.sh32
-rw-r--r--jjb/releng/testapi-automate.yml3
-rwxr-xr-xjjb/securedlab/check-jinja2.sh9
-rw-r--r--jjb/securedlab/check-jinja2.yml80
-rw-r--r--jjb/test-requirements.txt1
-rw-r--r--jjb/xci/bifrost-cleanup-job.yml2
-rw-r--r--jjb/xci/bifrost-periodic-jobs.yml152
-rwxr-xr-xjjb/xci/bifrost-provision.sh (renamed from jjb/xci/xci-provision.sh)21
-rw-r--r--jjb/xci/bifrost-verify-jobs.yml5
-rw-r--r--jjb/xci/osa-periodic-jobs.yml149
-rw-r--r--jjb/xci/xci-daily-jobs.yml178
-rwxr-xr-xjjb/xci/xci-deploy.sh122
-rw-r--r--jjb/yardstick/yardstick-ci-jobs.yml17
-rwxr-xr-xjjb/yardstick/yardstick-daily.sh2
-rw-r--r--modules/opnfv/deployment/compass/__init__.py (renamed from prototypes/xci/file/aio/openstack_user_config.yml)0
-rw-r--r--modules/opnfv/deployment/compass/adapter.py187
-rw-r--r--modules/opnfv/deployment/example.py14
-rw-r--r--modules/opnfv/deployment/factory.py6
-rw-r--r--modules/opnfv/deployment/manager.py9
-rw-r--r--modules/requirements.txt3
-rw-r--r--modules/test-requirements.txt6
-rw-r--r--prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic-inspector.conf.j266
-rw-r--r--prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic.conf.j292
-rwxr-xr-xprototypes/bifrost/scripts/bifrost-provision.sh2
-rw-r--r--prototypes/openstack-ansible/playbooks/configure-targethosts.yml2
-rw-r--r--prototypes/xci/README.rst217
-rwxr-xr-xprototypes/xci/config/env-vars5
-rwxr-xr-xprototypes/xci/config/pinned-versions8
-rw-r--r--prototypes/xci/docs/developer-guide.rst31
-rw-r--r--prototypes/xci/file/aio/configure-opnfvhost.yml22
-rw-r--r--prototypes/xci/file/aio/flavor-vars.yml3
-rw-r--r--prototypes/xci/file/aio/inventory2
-rw-r--r--prototypes/xci/file/ansible-role-requirements.yml104
-rw-r--r--prototypes/xci/file/exports14
-rw-r--r--prototypes/xci/file/ha/flavor-vars.yml39
-rw-r--r--prototypes/xci/file/ha/inventory11
-rw-r--r--prototypes/xci/file/ha/openstack_user_config.yml254
-rw-r--r--prototypes/xci/file/ha/user_variables.yml28
-rw-r--r--prototypes/xci/file/mini/flavor-vars.yml21
-rw-r--r--prototypes/xci/file/mini/inventory8
-rw-r--r--prototypes/xci/file/mini/openstack_user_config.yml170
-rw-r--r--prototypes/xci/file/mini/user_variables.yml28
-rw-r--r--prototypes/xci/file/modules8
-rw-r--r--prototypes/xci/file/noha/flavor-vars.yml27
-rw-r--r--prototypes/xci/file/noha/inventory9
-rw-r--r--prototypes/xci/file/noha/openstack_user_config.yml172
-rw-r--r--prototypes/xci/file/noha/user_variables.yml28
-rw-r--r--prototypes/xci/file/setup-openstack.yml5
-rw-r--r--prototypes/xci/file/user_variables.yml0
-rw-r--r--prototypes/xci/playbooks/configure-localhost.yml11
-rw-r--r--prototypes/xci/playbooks/configure-opnfvhost.yml29
-rw-r--r--prototypes/xci/playbooks/configure-targethosts.yml36
-rw-r--r--prototypes/xci/playbooks/provision-vm-nodes.yml32
-rw-r--r--prototypes/xci/playbooks/roles/configure-network/tasks/main.yml34
-rw-r--r--prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml43
-rw-r--r--prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml1
-rw-r--r--prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml18
-rw-r--r--prototypes/xci/template/compute.interface.j249
-rw-r--r--prototypes/xci/template/controller.interface.j245
-rw-r--r--prototypes/xci/template/opnfv.interface.j245
-rw-r--r--prototypes/xci/var/Debian.yml11
-rw-r--r--prototypes/xci/var/RedHat.yml10
-rw-r--r--prototypes/xci/var/Suse.yml10
-rw-r--r--prototypes/xci/var/opnfv.yml25
-rwxr-xr-xprototypes/xci/xci-deploy.sh196
-rw-r--r--setup.py9
-rw-r--r--tox.ini34
-rw-r--r--utils/create_pod_file.py102
-rw-r--r--utils/test/reporting/img/danube.jpgbin51220 -> 53437 bytes
-rw-r--r--utils/test/testapi/opnfv_testapi/common/check.py111
-rw-r--r--utils/test/testapi/opnfv_testapi/common/message.py46
-rw-r--r--utils/test/testapi/opnfv_testapi/common/raises.py39
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/handlers.py116
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/pod_handlers.py20
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/project_handlers.py21
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/result_handlers.py51
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py37
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py43
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/executor.py83
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/test_base.py2
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py2
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py51
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/test_project.py13
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/test_result.py23
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py9
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py15
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/test_token.py79
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/test_version.py11
123 files changed, 3546 insertions, 1280 deletions
diff --git a/.gitignore b/.gitignore
index 431e52139..eeabaeb63 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,3 +35,4 @@ testapi_venv/
.cache
.tox
*.retry
+job_output/
diff --git a/docs/jenkins-job-builder/opnfv-jjb-usage.rst b/docs/jenkins-job-builder/opnfv-jjb-usage.rst
index 52dbdebe5..f34833f5c 100644
--- a/docs/jenkins-job-builder/opnfv-jjb-usage.rst
+++ b/docs/jenkins-job-builder/opnfv-jjb-usage.rst
@@ -21,6 +21,14 @@ Make changes::
To ssh://agardner@gerrit.opnfv.org:29418/releng.git
* [new branch] HEAD -> refs/publish/master
+Test with tox::
+
+ tox -v -ejjb
+
+Submit the change to gerrit::
+
+ git review -v
+
Follow the link to gerrit https://gerrit.opnfv.org/gerrit/51 in a few moments
the verify job will have completed and you will see Verified +1 jenkins-ci in
the gerrit ui.
diff --git a/jjb/apex/apex-build.sh b/jjb/apex/apex-build.sh
index 220d02435..b6b2f212a 100755
--- a/jjb/apex/apex-build.sh
+++ b/jjb/apex/apex-build.sh
@@ -28,10 +28,10 @@ cd $WORKSPACE/ci
./build.sh $BUILD_ARGS
RPM_VERSION=$(grep Version: $WORKSPACE/build/rpm_specs/opnfv-apex.spec | awk '{ print $2 }')-$(echo $OPNFV_ARTIFACT_VERSION | tr -d '_-')
# list the contents of BUILD_OUTPUT directory
-echo "Build Directory is ${BUILD_DIRECTORY}"
+echo "Build Directory is ${BUILD_DIRECTORY}/../.build"
echo "Build Directory Contents:"
echo "-------------------------"
-ls -al $BUILD_DIRECTORY
+ls -al ${BUILD_DIRECTORY}/../.build
# list the contents of CACHE directory
echo "Cache Directory is ${CACHE_DIRECTORY}"
@@ -47,10 +47,10 @@ if ! echo $BUILD_TAG | grep "apex-verify" 1> /dev/null; then
echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
- echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso | cut -d' ' -f1)"
+ echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/../.build/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso | cut -d' ' -f1)"
echo "OPNFV_SRPM_URL=$GS_URL/opnfv-apex-$RPM_VERSION.src.rpm"
echo "OPNFV_RPM_URL=$GS_URL/opnfv-apex-$RPM_VERSION.noarch.rpm"
- echo "OPNFV_RPM_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/noarch/opnfv-apex-$RPM_VERSION.noarch.rpm | cut -d' ' -f1)"
+ echo "OPNFV_RPM_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/../.build/noarch/opnfv-apex-$RPM_VERSION.noarch.rpm | cut -d' ' -f1)"
echo "OPNFV_BUILD_URL=$BUILD_URL"
) > $WORKSPACE/opnfv.properties
fi
diff --git a/jjb/apex/apex-deploy.sh b/jjb/apex/apex-deploy.sh
index 6343b838b..06f7622f5 100755
--- a/jjb/apex/apex-deploy.sh
+++ b/jjb/apex/apex-deploy.sh
@@ -59,6 +59,12 @@ else
fi
fi
+# rename odl_l3 to odl only for master
+# this can be removed once all the odl_l3 references
+# are updated to odl after the danube jobs are removed
+if [[ "$BUILD_DIRECTORY" == *master* ]]; then
+ DEPLOY_SCENARIO=${DEPLOY_SCENARIO/odl_l3/odl}
+fi
if [ -z "$DEPLOY_SCENARIO" ]; then
echo "Deploy scenario not set!"
exit 1
@@ -87,8 +93,8 @@ if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy"
NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network"
DEPLOY_CMD="$(pwd)/deploy.sh"
- RESOURCES="${WORKSPACE}/.build/"
- CONFIG="${WORKSPACE}/build"
+ IMAGES="${WORKSPACE}/.build/"
+ BASE="${WORKSPACE}/build"
LIB="${WORKSPACE}/lib"
# Make sure python34 deps are installed
for dep_pkg in epel-release python34 python34-PyYAML python34-setuptools; do
@@ -123,7 +129,7 @@ if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
# use RPMs
else
# find version of RPM
- VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-[0-9]{8}')
+ VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-([0-9]{8}|[a-z]+-[0-9]\.[0-9]+)')
# build RPM List which already includes base Apex RPM
for pkg in ${APEX_PKGS}; do
RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}.noarch.rpm"
@@ -145,13 +151,13 @@ else
DEPLOY_CMD=opnfv-deploy
DEPLOY_SETTINGS_DIR="/etc/opnfv-apex/"
NETWORK_SETTINGS_DIR="/etc/opnfv-apex/"
- RESOURCES="/var/opt/opnfv/images"
- CONFIG="/var/opt/opnfv"
+ IMAGES="/var/opt/opnfv/images"
+ BASE="/var/opt/opnfv"
LIB="/var/opt/opnfv/lib"
fi
# set env vars to deploy cmd
-DEPLOY_CMD="CONFIG=${CONFIG} RESOURCES=${RESOURCES} LIB=${LIB} ${DEPLOY_CMD}"
+DEPLOY_CMD="BASE=${BASE} IMAGES=${IMAGES} LIB=${LIB} ${DEPLOY_CMD}"
if [ "$OPNFV_CLEAN" == 'yes' ]; then
if sudo test -e '/root/inventory/pod_settings.yaml'; then
@@ -160,9 +166,9 @@ if [ "$OPNFV_CLEAN" == 'yes' ]; then
clean_opts=''
fi
if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
- sudo CONFIG=${CONFIG} LIB=${LIB} ./clean.sh ${clean_opts}
+ sudo BASE=${BASE} LIB=${LIB} ./clean.sh ${clean_opts}
else
- sudo CONFIG=${CONFIG} LIB=${LIB} opnfv-clean ${clean_opts}
+ sudo BASE=${BASE} LIB=${LIB} opnfv-clean ${clean_opts}
fi
fi
@@ -183,7 +189,7 @@ if [[ "$JOB_NAME" == *virtual* ]]; then
# settings for virtual deployment
DEPLOY_CMD="${DEPLOY_CMD} -v"
if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then
- DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 14 --virtual-compute-ram 8"
+ DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 12 --virtual-compute-ram 7"
fi
if [[ "$JOB_NAME" == *csit* ]]; then
DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml"
diff --git a/jjb/apex/apex-iso-verify.sh b/jjb/apex/apex-iso-verify.sh
new file mode 100755
index 000000000..cdeac04d7
--- /dev/null
+++ b/jjb/apex/apex-iso-verify.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# log info to console
+echo "Starting the Apex iso verify."
+echo "--------------------------------------------------------"
+echo
+
+BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY
+
+source $BUILD_DIRECTORY/../opnfv.properties
+
+if ! rpm -q virt-install > /dev/null; then
+ sudo yum -y install virt-install
+fi
+
+# define a clean function
+rm_apex_iso_verify () {
+if sudo virsh list --all | grep apex-iso-verify | grep running; then
+ sudo virsh destroy apex-iso-verify
+fi
+if sudo virsh list --all | grep apex-iso-verify; then
+ sudo virsh undefine apex-iso-verify
+fi
+}
+
+# Make sure a pre-existing iso-verify isn't there
+rm_apex_iso_verify
+
+# run an install from the iso
+# This streams a serial console to tcp port 3737 on localhost
+sudo virt-install -n apex-iso-verify -r 4096 --vcpus 4 --os-variant=rhel7 \
+ --accelerate -v --noautoconsole --nographics \
+ --disk path=/var/lib/libvirt/images/apex-iso-verify.qcow2,size=30,format=qcow2 \
+ -l $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso \
+ --extra-args 'console=ttyS0 console=ttyS0,115200n8 serial inst.ks=file:/iso-verify.ks inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64:/' \
+ --initrd-inject $BUILD_DIRECTORY/../ci/iso-verify.ks \
+ --serial tcp,host=:3737,protocol=raw
+
+# Attach to tcpport 3737 and echo the output to stdout
+# watch for a 5 min time out, a power off message or a tcp disconnect
+python << EOP
+#!/usr/bin/env python
+
+import sys
+import socket
+from time import sleep
+from time import time
+
+
+TCP_IP = '127.0.0.1'
+TCP_PORT = 3737
+BUFFER_SIZE = 1024
+
+try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((TCP_IP, TCP_PORT))
+except Exception, e:
+ print "Failed to connect to the iso-verofy vm's serial console"
+ print "this probably means that the VM failed to start"
+ raise e
+
+activity = time()
+data = s.recv(BUFFER_SIZE)
+last_data = data
+while time() - activity < 300:
+ try:
+ if data != last_data:
+ activity = time()
+ last_data = data
+ data = s.recv(BUFFER_SIZE)
+ sys.stdout.write(data)
+ if 'Powering off' in data:
+ break
+ sleep(.5)
+ except socket.error, e:
+ # for now assuming that the connection was closed
+ # which is good, means the vm finished installing
+ # printing the error output just in case we need to debug
+ print "VM console connection lost: %s" % msg
+ break
+s.close()
+
+if time() - activity > 300:
+ print "failing due to console inactivity"
+ exit(1)
+else:
+ print "Success!"
+EOP
+
+# save the python return code for after cleanup
+python_rc=$?
+
+# clean up
+rm_apex_iso_verify
+
+# Exit with the RC of the Python job
+exit $python_rc
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
diff --git a/jjb/apex/apex-snapshot-deploy.sh b/jjb/apex/apex-snapshot-deploy.sh
index 8274740c8..06c002319 100644
--- a/jjb/apex/apex-snapshot-deploy.sh
+++ b/jjb/apex/apex-snapshot-deploy.sh
@@ -22,7 +22,7 @@ echo
echo "Cleaning server"
pushd ci > /dev/null
-sudo CONFIG=../build/ LIB=../lib ./clean.sh
+sudo BASE=../build/ LIB=../lib ./clean.sh
popd > /dev/null
echo "Downloading latest snapshot properties file"
diff --git a/jjb/apex/apex-unit-test.sh b/jjb/apex/apex-unit-test.sh
index 5c4341741..12cb862b0 100755
--- a/jjb/apex/apex-unit-test.sh
+++ b/jjb/apex/apex-unit-test.sh
@@ -9,7 +9,7 @@ echo
pushd ci/ > /dev/null
-sudo CONFIG="${WORKSPACE}/build" LIB="${WORKSPACE}/lib" ./clean.sh
+sudo BASE="${WORKSPACE}/build" LIB="${WORKSPACE}/lib" ./clean.sh
./test.sh
popd
diff --git a/jjb/apex/apex-upload-artifact.sh b/jjb/apex/apex-upload-artifact.sh
index c2de7d70d..d046c119d 100755
--- a/jjb/apex/apex-upload-artifact.sh
+++ b/jjb/apex/apex-upload-artifact.sh
@@ -3,8 +3,13 @@ set -o errexit
set -o nounset
set -o pipefail
+if [ -z "$ARTIFACT_TYPE" ]; then
+ echo "ERROR: ARTIFACT_TYPE not provided...exiting"
+ exit 1
+fi
+
# log info to console
-echo "Uploading the Apex artifact. This could take some time..."
+echo "Uploading the Apex ${ARTIFACT_TYPE} artifact. This could take some time..."
echo "--------------------------------------------------------"
echo
@@ -18,7 +23,7 @@ echo "Cloning releng repository..."
[ -d releng ] && rm -rf releng
git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null
#this is where we import the siging key
-if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
+if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
source $WORKSPACE/releng/utils/gpg_import_key.sh
fi
@@ -45,32 +50,18 @@ echo "ISO signature Upload Complete!"
}
uploadiso () {
-# upload artifact and additional files to google storage
-gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log
-echo "ISO Upload Complete!"
-RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
-RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
-VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud; do # removed onos for danube
- RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-done
-SRPM_INSTALL_PATH=$BUILD_DIRECTORY
-SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
-VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud; do # removed onos for danube
- SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-done
+ gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log
+ echo "ISO Upload Complete!"
}
uploadrpm () {
-#This is where we upload the rpms
-for artifact in $RPM_LIST $SRPM_LIST; do
- echo "Uploading artifact: ${artifact}"
- gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log
- echo "Upload complete for ${artifact}"
-done
-gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log
-gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log
+ for artifact in $RPM_LIST $SRPM_LIST; do
+ echo "Uploading artifact: ${artifact}"
+ gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log
+ echo "Upload complete for ${artifact}"
+ done
+ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log
+ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log
}
uploadsnap () {
@@ -84,21 +75,43 @@ uploadsnap () {
echo "Upload complete for Snapshot"
}
-if echo $WORKSPACE | grep promote > /dev/null; then
- uploadsnap
-elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
+if gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
echo "Signing Key avaliable"
- signiso
+ SIGN_ARTIFACT="true"
+fi
+
+if [ "$ARTIFACT_TYPE" == 'snapshot' ]; then
+ uploadsnap
+elif [ "$ARTIFACT_TYPE" == 'iso' ]; then
+ if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
+ signiso
+ fi
uploadiso
- signrpm
+elif [ "$ARTIFACT_TYPE" == 'rpm' ]; then
+ RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
+ RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
+ VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
+ for pkg in common undercloud; do # removed onos for danube
+ RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
+ done
+ SRPM_INSTALL_PATH=$BUILD_DIRECTORY
+ SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
+ VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
+ for pkg in common undercloud; do # removed onos for danube
+ SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
+ done
+
+ if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
+ signrpm
+ fi
uploadrpm
else
- uploadiso
- uploadrpm
+ echo "ERROR: Unknown artifact type ${ARTIFACT_TYPE} to upload...exiting"
+ exit 1
fi
echo
echo "--------------------------------------------------------"
echo "Done!"
-echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"
+if [ "$ARTIFACT_TYPE" == 'iso' ]; then echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"; fi
+if [ "$ARTIFACT_TYPE" == 'rpm' ]; then echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"; fi
diff --git a/jjb/apex/apex.yml b/jjb/apex/apex.yml
index e7982ba55..7ca2e6edd 100644
--- a/jjb/apex/apex.yml
+++ b/jjb/apex/apex.yml
@@ -12,6 +12,7 @@
- 'apex-daily-{stream}'
- 'apex-csit-promote-daily-{stream}'
- 'apex-fdio-promote-daily-{stream}'
+ - 'apex-verify-iso-{stream}'
# stream: branch with - in place of / (eg. stable-arno)
# branch: branch (eg. stable/arno)
@@ -443,8 +444,64 @@
git-revision: false
same-node: true
block: true
+ - inject:
+ properties-content: ARTIFACT_TYPE=rpm
+ - 'apex-upload-artifact'
+ - trigger-builds:
+ - project: 'apex-verify-iso-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ git-revision: false
+ block: true
+ same-node: true
+ - inject:
+ properties-content: ARTIFACT_TYPE=iso
- 'apex-upload-artifact'
+# ISO verify job
+- job-template:
+ name: 'apex-verify-iso-{stream}'
+
+ # Job template for builds
+ #
+ # Required Variables:
+ # stream: branch with - in place of / (eg. stable)
+ # branch: branch (eg. stable)
+ node: '{daily-slave}'
+
+ disabled: false
+
+ concurrent: true
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - apex-parameter:
+ gs-pathname: '{gs-pathname}'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: "Used for overriding the GIT URL coming from parameters macro."
+
+ scm:
+ - git-scm
+
+ properties:
+ - logrotate-default
+ - build-blocker:
+ use-build-blocker: true
+ block-level: 'NODE'
+ blocking-jobs:
+ - 'apex-deploy.*'
+ - throttle:
+ max-per-node: 1
+ max-total: 10
+ option: 'project'
+
+ builders:
+ - 'apex-iso-verify'
+
- job-template:
name: 'apex-deploy-virtual-{scenario}-{stream}'
@@ -616,7 +673,7 @@
# 4.not used for release criteria or compliance,
# only to debug the dovetail tool bugs with apex
#- trigger-builds:
- # - project: 'dovetail-apex-{slave}-debug-{stream}'
+ # - project: 'dovetail-apex-{slave}-proposed_tests-{stream}'
# current-parameters: false
# predefined-parameters:
# DEPLOY_SCENARIO=os-nosdn-nofeature-ha
@@ -807,7 +864,7 @@
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
- trigger-builds:
- - project: 'apex-deploy-baremetal-os-odl_l3-fdio-noha-{stream}'
+ - project: 'apex-deploy-baremetal-os-odl_l3-fdio-ha-{stream}'
predefined-parameters: |
BUILD_DIRECTORY=apex-build-{stream}/.build
OPNFV_CLEAN=yes
@@ -819,7 +876,7 @@
- trigger-builds:
- project: 'functest-apex-{daily-slave}-daily-{stream}'
predefined-parameters:
- DEPLOY_SCENARIO=os-odl_l3-fdio-noha
+ DEPLOY_SCENARIO=os-odl_l3-fdio-ha
block: true
same-node: true
block-thresholds:
@@ -829,7 +886,7 @@
- trigger-builds:
- project: 'yardstick-apex-{slave}-daily-{stream}'
predefined-parameters:
- DEPLOY_SCENARIO=os-odl_l3-fdio-noha
+ DEPLOY_SCENARIO=os-odl_l3-fdio-ha
block: true
same-node: true
block-thresholds:
@@ -1013,8 +1070,9 @@
same-node: true
- shell:
!include-raw-escape: ./apex-snapshot-create.sh
- - shell:
- !include-raw-escape: ./apex-upload-artifact.sh
+ - inject:
+ properties-content: ARTIFACT_TYPE=snapshot
+ - 'apex-upload-artifact'
# FDIO promote
- job-template:
@@ -1062,8 +1120,9 @@
same-node: true
- shell:
!include-raw-escape: ./apex-snapshot-create.sh
- - shell:
- !include-raw-escape: ./apex-upload-artifact.sh
+ - inject:
+ properties-content: ARTIFACT_TYPE=snapshot
+ - 'apex-upload-artifact'
- job-template:
name: 'apex-gs-clean-{stream}'
@@ -1147,6 +1206,13 @@
!include-raw: ./apex-workspace-cleanup.sh
- builder:
+ name: 'apex-iso-verify'
+ builders:
+ - shell:
+ !include-raw: ./apex-iso-verify.sh
+
+
+- builder:
name: 'apex-upload-artifact'
builders:
- shell:
diff --git a/jjb/armband/armband-ci-jobs.yml b/jjb/armband/armband-ci-jobs.yml
index 38a729de6..17d520419 100644
--- a/jjb/armband/armband-ci-jobs.yml
+++ b/jjb/armband/armband-ci-jobs.yml
@@ -56,8 +56,12 @@
slave-label: arm-pod3
installer: fuel
<<: *danube
- - arm-pod3-2:
- slave-label: arm-pod3-2
+ - arm-pod4:
+ slave-label: arm-pod4
+ installer: fuel
+ <<: *danube
+ - arm-virtual1:
+ slave-label: arm-virtual1
installer: fuel
<<: *danube
#--------------------------------
@@ -71,8 +75,12 @@
slave-label: arm-pod3
installer: fuel
<<: *master
- - arm-pod3-2:
- slave-label: arm-pod3-2
+ - arm-pod4:
+ slave-label: arm-pod4
+ installer: fuel
+ <<: *master
+ - arm-virtual1:
+ slave-label: arm-virtual1
installer: fuel
<<: *master
#--------------------------------
@@ -181,7 +189,7 @@
# 4.not used for release criteria or compliance,
# only to debug the dovetail tool bugs with arm pods
- trigger-builds:
- - project: 'dovetail-{installer}-{pod}-debug-{stream}'
+ - project: 'dovetail-{installer}-{pod}-proposed_tests-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
@@ -333,31 +341,31 @@
- trigger:
name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 1'
+ - timed: ''
- trigger:
name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 2'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 3'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 4'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 5'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-sfc-ha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 6'
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-master-trigger'
triggers:
- - timed: '0 2 * * 7'
+ - timed: ''
#--------------------------------------------------------------------
# Enea Armband CI Virtual Triggers running against danube branch
#--------------------------------------------------------------------
@@ -389,6 +397,71 @@
name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-danube-trigger'
triggers:
- timed: ''
+
+#--------------------------------------------------------------------
+# Enea Armband Non CI Virtual Triggers running against danube branch
+#--------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-danube-trigger'
+ triggers:
+ - timed: ''
+
+#--------------------------------------------------------------------
+# Enea Armband Non CI Virtual Triggers running against master branch
+#--------------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-master-trigger'
+ triggers:
+ - timed: ''
+
#----------------------------------------------------------
# Enea Armband POD 2 Triggers running against master branch
#----------------------------------------------------------
@@ -517,61 +590,61 @@
# Enea Armband POD 3 Triggers running against master branch (aarch64 slave)
#--------------------------------------------------------------------------
- trigger:
- name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod4-master-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-master-trigger'
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod4-master-trigger'
triggers:
- timed: ''
#--------------------------------------------------------------------------
# Enea Armband POD 3 Triggers running against danube branch (aarch64 slave)
#--------------------------------------------------------------------------
- trigger:
- name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod4-danube-trigger'
triggers:
- timed: ''
- trigger:
- name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-2-danube-trigger'
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod4-danube-trigger'
triggers:
- timed: ''
diff --git a/jjb/armband/armband-deploy.sh b/jjb/armband/armband-deploy.sh
index 2e5aa3924..e445e0850 100755
--- a/jjb/armband/armband-deploy.sh
+++ b/jjb/armband/armband-deploy.sh
@@ -33,10 +33,10 @@ fi
# set deployment parameters
export TMPDIR=${WORKSPACE}/tmpdir
-# arm-pod3-2 is an aarch64 jenkins slave for the same POD as the
+# arm-pod4 is an aarch64 jenkins slave for the same POD as the
# x86 jenkins slave arm-pod3; therefore we use the same pod name
# to deploy the pod from both jenkins slaves
-if [[ "${NODE_NAME}" == "arm-pod3-2" ]]; then
+if [[ "${NODE_NAME}" == "arm-pod4" ]]; then
NODE_NAME="arm-pod3"
fi
diff --git a/jjb/bottlenecks/bottlenecks-cleanup.sh b/jjb/bottlenecks/bottlenecks-cleanup.sh
index 052f72eef..04e620c7f 100644
--- a/jjb/bottlenecks/bottlenecks-cleanup.sh
+++ b/jjb/bottlenecks/bottlenecks-cleanup.sh
@@ -8,11 +8,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-BASEDIR=`dirname $0`
-
#clean up correlated dockers and their images
-bash ${BASEDIR}/docker_cleanup.sh -d bottlenecks --debug
-bash ${BASEDIR}/docker_cleanup.sh -d yardstick --debug
-bash ${BASEDIR}/docker_cleanup.sh -d kibana --debug
-bash ${BASEDIR}/docker_cleanup.sh -d elasticsearch --debug
-bash ${BASEDIR}/docker_cleanup.sh -d influxdb --debug \ No newline at end of file
+bash $WORKSPACE/docker/docker_cleanup.sh -d bottlenecks --debug
+bash $WORKSPACE/docker/docker_cleanup.sh -d yardstick --debug
+bash $WORKSPACE/docker/docker_cleanup.sh -d kibana --debug
+bash $WORKSPACE/docker/docker_cleanup.sh -d elasticsearch --debug
+bash $WORKSPACE/docker/docker_cleanup.sh -d influxdb --debug
diff --git a/jjb/bottlenecks/bottlenecks-project-jobs.yml b/jjb/bottlenecks/bottlenecks-project-jobs.yml
index a0abb9331..5dced2aad 100644
--- a/jjb/bottlenecks/bottlenecks-project-jobs.yml
+++ b/jjb/bottlenecks/bottlenecks-project-jobs.yml
@@ -70,8 +70,8 @@
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
builders:
- - bottlenecks-hello
- #- bottlenecks-unit-tests
+ #- bottlenecks-hello
+ - bottlenecks-unit-tests
- job-template:
name: 'bottlenecks-merge-{stream}'
@@ -206,10 +206,10 @@
# install python packages
easy_install -U setuptools
easy_install -U pip
- pip install -r requirements.txt
+ pip install -r $WORKSPACE/requirements/verify.txt
# unit tests
- /bin/bash $WORKSPACE/tests.sh
+ /bin/bash $WORKSPACE/verify.sh
deactivate
@@ -220,4 +220,4 @@
#!/bin/bash
set -o errexit
- echo "hello"
+ echo -e "Wellcome to Bottlenecks! \nMerge event is planning to support more functions! "
diff --git a/jjb/bottlenecks/docker_cleanup.sh b/jjb/bottlenecks/docker_cleanup.sh
deleted file mode 100644
index cfc8e8b0e..000000000
--- a/jjb/bottlenecks/docker_cleanup.sh
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-usage="Script to clear up dockers and their images.
-
-usage:
- bash $(basename "$0") [-h|--help] [-d|--docker <docker name>] [--debug]
-
-where:
- -h|--help show the help text
- -d|--docker specify dockers' name
- <docker name> keyword of dockers' name used to find dockers
- e.g. keyword "bottlenecks" to find "opnfv/bottlenecks:*"
- --debug print debug information with default false
-
-examples:
- $(basename "$0")
- $(basename "$0") -d bottlenecks --debug"
-
-clnup_debug=false
-
-while [[ $#>0 ]]; do
- clnup_docr="$1"
- case $clnup_docr in
- -h|--help)
- echo "$usage"
- exit 0
- shift
- ;;
- -d|--docker)
- docker_name="$2"
- shift
-
- if [[ $2 == "--debug" ]]; then
- clnup_debug=true
- shift
- fi
- ;;
- --debug)
- clnup_debug=true
- shift
- if [[ "$1" == "-d" || "$1" == "--docker" ]]; then
- docker_name="$2"
- shift
- fi
- ;;
- *)
- echo "unknow options $1 $2 $3"
- exit 1
- ;;
- esac
- shift
-done
-
-
-# check if docker name is empty
-if [[ $docker_name == "" ]]; then
- echo empty docker name
- exit 1
-fi
-
-# clean up dockers and their images with keyword in their names
-[[ $clnup_debug == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
-
-echo "$docker_name: docker containers/images cleaning up"
-
-dangling_images=($(docker images -f "dangling=true" | grep $docker_name | awk '{print $3}'))
-if [[ -n $dangling_images ]]; then
- echo "Removing $docker_name:<none> dangling images and their containers"
- docker images | head -1 && docker images | grep $dangling_images
- for image_id in "${dangling_images[@]}"; do
- echo "$docker_name: Removing dangling image $image_id"
- docker rmi -f $image_id >${redirect}
- done
-fi
-
-for image_id in "${dangling_images[@]}"; do
- if [[ -n $(docker ps -a | grep $image_id) ]]; then
- echo "$docker_name: Removing containers associated with dangling image: $image_id"
- docker ps -a | head -1 && docker ps -a | grep $image_id
- docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect}
- fi
-done
-
-if [[ -n $(docker ps -a | grep $docker_name) ]]; then
- echo "Removing existing $docker_name containers"
- docker ps -a | head -1 && docker ps -a | grep $docker_name
- docker ps -a | grep $docker_name | awk '{print $1}' | xargs docker rm -f >$redirect
-fi
-
-if [[ -n $(docker images | grep $docker_name) ]]; then
- echo "$docker_name: docker images to remove:"
- docker images | head -1 && docker images | grep $docker_name
- image_ids=($(docker images | grep $docker_name | awk '{print $3}'))
- for image_id in "${image_ids[@]}"; do
- echo "Removing docker image $docker_name:$tag..."
- docker rmi $image_id >$redirect
- done
-fi
diff --git a/jjb/compass4nfv/compass-ci-jobs.yml b/jjb/compass4nfv/compass-ci-jobs.yml
index 237f8944d..61845acdf 100644
--- a/jjb/compass4nfv/compass-ci-jobs.yml
+++ b/jjb/compass4nfv/compass-ci-jobs.yml
@@ -160,7 +160,7 @@
#dovetail only master by now, not sync with A/B/C branches
#here the stream means the SUT stream, dovetail stream is defined in its own job
- trigger-builds:
- - project: 'dovetail-compass-{pod}-debug-{stream}'
+ - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
diff --git a/jjb/compass4nfv/compass-dovetail-jobs.yml b/jjb/compass4nfv/compass-dovetail-jobs.yml
index d49d0ec5f..c321655d7 100644
--- a/jjb/compass4nfv/compass-dovetail-jobs.yml
+++ b/jjb/compass4nfv/compass-dovetail-jobs.yml
@@ -6,8 +6,8 @@
#----------------------------------
# BRANCH ANCHORS
#----------------------------------
- colorado: &colorado
- stream: colorado
+ danube: &danube
+ stream: danube
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
disabled: false
@@ -20,14 +20,14 @@
pod:
- baremetal:
slave-label: compass-baremetal
- os-version: 'trusty'
- <<: *colorado
+ os-version: 'xenial'
+ <<: *danube
#-----------------------------------
# scenarios
#-----------------------------------
scenario:
- 'os-nosdn-nofeature-ha':
- disabled: false
+ disabled: true
auto-trigger-name: 'compass-{scenario}-{pod}-weekly-{stream}-trigger'
jobs:
@@ -98,17 +98,6 @@
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
- trigger-builds:
- - project: 'dovetail-compass-{pod}-debug-weekly-{stream}'
- current-parameters: false
- predefined-parameters:
- DEPLOY_SCENARIO={scenario}
- block: true
- same-node: true
- block-thresholds:
- build-step-failure-threshold: 'never'
- failure-threshold: 'never'
- unstable-threshold: 'FAILURE'
- - trigger-builds:
- project: 'dovetail-compass-{pod}-proposed_tests-weekly-{stream}'
current-parameters: false
predefined-parameters:
@@ -192,13 +181,13 @@
- choice:
name: COMPASS_OPENSTACK_VERSION
choices:
- - 'mitaka'
+ - 'newton'
########################
# trigger macros
########################
- trigger:
- name: 'compass-os-nosdn-nofeature-ha-baremetal-weekly-colorado-trigger'
+ name: 'compass-os-nosdn-nofeature-ha-baremetal-weekly-danube-trigger'
triggers:
- timed: 'H H * * 0'
diff --git a/jjb/compass4nfv/compass-project-jobs.yml b/jjb/compass4nfv/compass-project-jobs.yml
index f962518e0..59482459e 100644
--- a/jjb/compass4nfv/compass-project-jobs.yml
+++ b/jjb/compass4nfv/compass-project-jobs.yml
@@ -125,7 +125,7 @@
description: "URL to Google Storage."
- string:
name: PPA_REPO
- default: "http://205.177.226.237:9999{ppa-pathname}"
+ default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
- string:
name: PPA_CACHE
default: "$WORKSPACE/work/repo/"
diff --git a/jjb/compass4nfv/compass-verify-jobs.yml b/jjb/compass4nfv/compass-verify-jobs.yml
index 14279e649..56f54d838 100644
--- a/jjb/compass4nfv/compass-verify-jobs.yml
+++ b/jjb/compass4nfv/compass-verify-jobs.yml
@@ -339,7 +339,7 @@
description: "URL to Google Storage."
- string:
name: PPA_REPO
- default: "http://205.177.226.237:9999{ppa-pathname}"
+ default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
- string:
name: PPA_CACHE
default: "$WORKSPACE/work/repo/"
diff --git a/jjb/cperf/cperf-ci-jobs.yml b/jjb/cperf/cperf-ci-jobs.yml
index 2742f08f7..dc209d644 100644
--- a/jjb/cperf/cperf-ci-jobs.yml
+++ b/jjb/cperf/cperf-ci-jobs.yml
@@ -126,14 +126,20 @@
undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
- sudo scp $INSTALLER_IP:/home/stack/stackrc /tmp/stackrc
- source /tmp/stackrc
+
+ sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
+ sudo chmod 755 /tmp/overcloudrc
+ source /tmp/overcloudrc
# robot suites need the ssh key to log in to controller nodes, so throwing it
# in tmp, and mounting /tmp as $HOME as far as robot is concerned
- sudo mkdir -p /tmp/.ssh
- sudo scp $INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
- sudo chmod -R 0600 /tmp/.ssh
+ sudo rm -rf /tmp/.ssh
+ sudo mkdir /tmp/.ssh
+ sudo chmod 0700 /tmp/.ssh
+ sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
+ sudo chown -R jenkins-ci:jenkins-ci /tmp/.ssh
+ # done with sudo. jenkins-ci is the user from this point
+ chmod 0600 /tmp/.ssh/id_rsa
# cbench requires the openflow drop test feature to be installed.
sshpass -p karaf ssh -o StrictHostKeyChecking=no \
@@ -144,7 +150,7 @@
docker pull opnfv/cperf:$DOCKER_TAG
- robot_cmd="pybot -e exclude -L TRACE \
+ robot_cmd="pybot -e exclude -L TRACE -d /tmp \
-v ODL_SYSTEM_1_IP:${SDN_CONTROLLER_IP} \
-v ODL_SYSTEM_IP:${SDN_CONTROLLER_IP} \
-v BUNDLEFOLDER:/opt/opendaylight \
@@ -156,7 +162,7 @@
-v of_port:6653"
robot_suite="/home/opnfv/repos/odl_test/csit/suites/openflowplugin/Performance/010_Cbench.robot"
- docker run -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
+ docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
- builder:
name: cperf-cleanup
diff --git a/jjb/daisy4nfv/daisy-daily-jobs.yml b/jjb/daisy4nfv/daisy-daily-jobs.yml
index c5d8e7e8b..aac76baa4 100644
--- a/jjb/daisy4nfv/daisy-daily-jobs.yml
+++ b/jjb/daisy4nfv/daisy-daily-jobs.yml
@@ -71,7 +71,6 @@
use-build-blocker: true
blocking-jobs:
- 'daisy-daily-.*'
- - 'daisy4nfv-(merge|verify)-.*'
block-level: 'NODE'
wrappers:
diff --git a/jjb/daisy4nfv/daisy-project-jobs.yml b/jjb/daisy4nfv/daisy-project-jobs.yml
index 52769ca88..e631ee9b9 100644
--- a/jjb/daisy4nfv/daisy-project-jobs.yml
+++ b/jjb/daisy4nfv/daisy-project-jobs.yml
@@ -54,6 +54,11 @@
enabled: true
max-total: 4
option: 'project'
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - '{installer}-daily-.*'
+ block-level: 'NODE'
scm:
- git-scm
@@ -138,12 +143,6 @@
enabled: true
max-total: 6
option: 'project'
- - build-blocker:
- use-build-blocker: true
- blocking-jobs:
- - '{installer}-daily-.*'
- - 'daisy4nfv-(merge|verify)-.*'
- block-level: 'NODE'
scm:
- git-scm
diff --git a/jjb/daisy4nfv/daisy4nfv-basic.sh b/jjb/daisy4nfv/daisy4nfv-basic.sh
index 04b9b7bfa..87f5482e0 100755
--- a/jjb/daisy4nfv/daisy4nfv-basic.sh
+++ b/jjb/daisy4nfv/daisy4nfv-basic.sh
@@ -4,4 +4,3 @@ echo "--------------------------------------------------------"
echo "This is diasy4nfv basic job!"
echo "--------------------------------------------------------"
-sudo rm -rf /home/jenkins-ci/opnfv/slave_root/workspace/daisy4nfv-verify-build-master/*
diff --git a/jjb/daisy4nfv/daisy4nfv-build.sh b/jjb/daisy4nfv/daisy4nfv-build.sh
index 375d80733..925f68e18 100755
--- a/jjb/daisy4nfv/daisy4nfv-build.sh
+++ b/jjb/daisy4nfv/daisy4nfv-build.sh
@@ -1,5 +1,9 @@
#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
echo "--------------------------------------------------------"
echo "This is diasy4nfv build job!"
echo "--------------------------------------------------------"
diff --git a/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml b/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml
index 95e72e550..9e7b867af 100644
--- a/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml
+++ b/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml
@@ -29,7 +29,7 @@
- 'build':
slave-label: 'opnfv-build-centos'
- 'deploy-virtual':
- slave-label: 'opnfv-build-centos'
+ slave-label: 'daisy-virtual'
#####################################
# jobs
#####################################
@@ -148,8 +148,7 @@
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - '{alias}-(merge|verify)-.*'
- - '{project}-daily-.*'
+ - '{alias}-merge-(master|danube)'
block-level: 'NODE'
scm:
diff --git a/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml b/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
index 9f44d99fb..a0ec2ebd7 100644
--- a/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
+++ b/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
@@ -1,10 +1,7 @@
- project:
name: 'daisy4nfv-verify-jobs'
-
project: 'daisy'
-
installer: 'daisy'
-
##########################################################
# use alias to keep the jobs'name existed alread unchanged
##########################################################
@@ -26,7 +23,9 @@
# patch verification phases
#####################################
phase:
- - 'build':
+ - unit:
+ slave-label: 'opnfv-build'
+ - build:
slave-label: 'opnfv-build-centos'
#####################################
# jobs
@@ -39,29 +38,22 @@
#####################################
- job-template:
name: '{alias}-verify-{stream}'
-
project-type: multijob
-
disabled: false
-
concurrent: true
-
properties:
- logrotate-default
- throttle:
enabled: true
max-total: 4
option: 'project'
-
scm:
- git-scm
-
wrappers:
- ssh-agent-wrapper
- timeout:
timeout: 360
fail: true
-
triggers:
- gerrit:
server-name: 'gerrit.opnfv.org'
@@ -108,6 +100,15 @@
- description-setter:
description: "Built on $NODE_NAME"
- multijob:
+ name: unit
+ condition: SUCCESSFUL
+ projects:
+ - name: '{alias}-verify-{name}-{stream}'
+ current-parameters: true
+ node-parameters: false
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ - multijob:
name: build
condition: SUCCESSFUL
projects:
@@ -124,33 +125,21 @@
- job-template:
name: '{alias}-verify-{phase}-{stream}'
-
disabled: '{obj:disabled}'
-
concurrent: true
-
properties:
- logrotate-default
- throttle:
enabled: true
max-total: 6
option: 'project'
- - build-blocker:
- use-build-blocker: true
- blocking-jobs:
- - '{alias}-(merge|verify)-.*'
- - '{installer}-daily-.*'
- block-level: 'NODE'
-
scm:
- git-scm
-
wrappers:
- ssh-agent-wrapper
- timeout:
timeout: 360
fail: true
-
parameters:
- project-parameter:
project: '{project}'
@@ -158,7 +147,6 @@
- '{slave-label}-defaults'
- '{alias}-verify-defaults':
gs-pathname: '{gs-pathname}'
-
builders:
- description-setter:
description: "Built on $NODE_NAME"
@@ -177,6 +165,16 @@
- shell:
!include-raw: ./daisy4nfv-workspace-cleanup.sh
+- builder:
+ name: daisy-verify-unit-macro
+ builders:
+ - shell: |
+ #!/bin/bash
+ set -o errexit
+ set -o pipefail
+ set -o xtrace
+ tox -e py27
+
#####################################
# parameter macros
#####################################
diff --git a/jjb/doctor/doctor.yml b/jjb/doctor/doctor.yml
index c677ef96e..807d436da 100644
--- a/jjb/doctor/doctor.yml
+++ b/jjb/doctor/doctor.yml
@@ -112,7 +112,7 @@
# functest-suite-parameter
- string:
name: FUNCTEST_SUITE_NAME
- default: '{project}'
+ default: 'doctor-notification'
- string:
name: TESTCASE_OPTIONS
default: '-e INSPECTOR_TYPE={inspector} -e PROFILER_TYPE={profiler} -v $WORKSPACE:/home/opnfv/repos/doctor'
diff --git a/jjb/dovetail/dovetail-ci-jobs.yml b/jjb/dovetail/dovetail-ci-jobs.yml
index b65e6d5ef..4998278c8 100644
--- a/jjb/dovetail/dovetail-ci-jobs.yml
+++ b/jjb/dovetail/dovetail-ci-jobs.yml
@@ -137,11 +137,16 @@
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
+ - arm-virtual1:
+ slave-label: '{pod}'
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
#--------------------------------
testsuite:
- 'debug'
- - 'proposed_tests'
- 'compliance_set'
+ - 'proposed_tests'
jobs:
- 'dovetail-{SUT}-{pod}-{testsuite}-{stream}'
@@ -169,6 +174,7 @@
- timeout:
timeout: 180
abort: true
+ - fix-workspace-permissions
triggers:
- '{auto-trigger-name}'
@@ -208,9 +214,6 @@
- 'dovetail-cleanup'
- 'dovetail-run'
- wrappers:
- - fix-workspace-permissions
-
publishers:
- archive:
artifacts: 'results/**/*'
diff --git a/jjb/dovetail/dovetail-run.sh b/jjb/dovetail/dovetail-run.sh
index 5161a3c7c..5f462e9c3 100755
--- a/jjb/dovetail/dovetail-run.sh
+++ b/jjb/dovetail/dovetail-run.sh
@@ -32,10 +32,11 @@ if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FOR
sudo iptables -I FORWARD -j RETURN
fi
+releng_repo=${WORKSPACE}/releng
+[ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
+git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
+
if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
- releng_repo=${WORKSPACE}/releng
- [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
- git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
fi
@@ -47,16 +48,57 @@ else
exit 1
fi
+sudo pip install virtualenv
+
+cd ${releng_repo}/modules
+sudo virtualenv venv
+source venv/bin/activate
+sudo pip install -e ./ >/dev/null
+
+if [[ ${INSTALLER_TYPE} == compass ]]; then
+ options="-u root -p root"
+elif [[ ${INSTALLER_TYPE} == fuel ]]; then
+ options="-u root -p r00tme"
+else
+ echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
+ echo "HA test cases may not run properly."
+fi
+
+pod_file_dir="/home/opnfv/dovetail/userconfig"
+cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} -i ${INSTALLER_IP} ${options} -f ${pod_file_dir}/pod.yaml"
+echo ${cmd}
+${cmd}
+
+deactivate
+
+cd ${WORKSPACE}
+
+if [ -f ${pod_file_dir}/pod.yaml ]; then
+ echo "file ${pod_file_dir}/pod.yaml:"
+ cat ${pod_file_dir}/pod.yaml
+else
+ echo "Error: There doesn't exist file ${pod_file_dir}/pod.yaml."
+ echo "HA test cases may not run properly."
+fi
+
+ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+
+if [ "$INSTALLER_TYPE" == "fuel" ]; then
+ echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+ sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${pod_file_dir}/id_rsa
+fi
+
opts="--privileged=true -id"
results_envs="-v /var/run/docker.sock:/var/run/docker.sock \
-v /home/opnfv/dovetail/results:/home/opnfv/dovetail/results"
openrc_volume="-v ${OPENRC}:${OPENRC}"
+userconfig_volume="-v ${pod_file_dir}:${pod_file_dir}"
# Pull the image with correct tag
echo "Dovetail: Pulling image opnfv/dovetail:${DOCKER_TAG}"
docker pull opnfv/dovetail:$DOCKER_TAG >$redirect
-cmd="docker run ${opts} ${results_envs} ${openrc_volume} \
+cmd="docker run ${opts} ${results_envs} ${openrc_volume} ${userconfig_volume} \
${sshkey} opnfv/dovetail:${DOCKER_TAG} /bin/bash"
echo "Dovetail: running docker run command: ${cmd}"
${cmd} >${redirect}
diff --git a/jjb/dovetail/dovetail-weekly-jobs.yml b/jjb/dovetail/dovetail-weekly-jobs.yml
index 7b3ede902..700657d68 100644
--- a/jjb/dovetail/dovetail-weekly-jobs.yml
+++ b/jjb/dovetail/dovetail-weekly-jobs.yml
@@ -10,8 +10,8 @@
dovetail-branch: '{stream}'
gs-pathname: ''
docker-tag: 'latest'
- colorado: &colorado
- stream: colorado
+ danube: &danube
+ stream: danube
branch: 'stable/{stream}'
dovetail-branch: master
gs-pathname: '/{stream}'
@@ -28,40 +28,40 @@
pod:
# - baremetal:
# slave-label: apex-baremetal
-# sut: apex
-# <<: *colorado
+# SUT: apex
+# <<: *danube
- baremetal:
slave-label: compass-baremetal
- sut: compass
- <<: *colorado
+ SUT: compass
+ <<: *danube
# - baremetal:
# slave-label: fuel-baremetal
-# sut: fuel
-# <<: *master
+# SUT: fuel
+# <<: *danube
# - baremetal:
# slave-label: joid-baremetal
-# sut: joid
-# <<: *colorado
+# SUT: joid
+# <<: *danube
testsuite:
- 'debug'
- - 'proposed_tests'
- 'compliance_set'
+ - 'proposed_tests'
loop:
- 'weekly':
- job-timeout: 60
+ job-timeout: 180
jobs:
- - 'dovetail-{sut}-{pod}-{testsuite}-{loop}-{stream}'
+ - 'dovetail-{SUT}-{pod}-{testsuite}-{loop}-{stream}'
################################
# job template
################################
- job-template:
- name: 'dovetail-{sut}-{pod}-{testsuite}-{loop}-{stream}'
+ name: 'dovetail-{SUT}-{pod}-{testsuite}-{loop}-{stream}'
- disabled: false
+ disabled: true
concurrent: true
@@ -78,12 +78,13 @@
- timeout:
timeout: '{job-timeout}'
abort: true
+ - fix-workspace-permissions
parameters:
- project-parameter:
project: '{project}'
branch: '{dovetail-branch}'
- - '{sut}-defaults'
+ - '{SUT}-defaults'
- '{slave-label}-defaults'
- string:
name: DEPLOY_SCENARIO
@@ -114,9 +115,6 @@
- 'dovetail-cleanup'
- 'dovetail-run'
- wrappers:
- - fix-workspace-permissions
-
publishers:
- archive:
artifacts: 'results/**/*'
diff --git a/jjb/functest/functest-daily-jobs.yml b/jjb/functest/functest-daily-jobs.yml
index 0a2a2197c..3c04a4ac0 100644
--- a/jjb/functest/functest-daily-jobs.yml
+++ b/jjb/functest/functest-daily-jobs.yml
@@ -158,7 +158,11 @@
slave-label: '{pod}'
installer: fuel
<<: *master
- - arm-pod3-2:
+ - arm-pod4:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *master
+ - arm-virtual1:
slave-label: '{pod}'
installer: fuel
<<: *master
@@ -190,7 +194,11 @@
slave-label: '{pod}'
installer: fuel
<<: *danube
- - arm-pod3-2:
+ - arm-pod4:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *danube
+ - arm-virtual1:
slave-label: '{pod}'
installer: fuel
<<: *danube
diff --git a/jjb/global/slave-params.yml b/jjb/global/slave-params.yml
index 1905a098a..fad06b077 100644
--- a/jjb/global/slave-params.yml
+++ b/jjb/global/slave-params.yml
@@ -747,15 +747,33 @@
default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
description: 'Base URI to the configuration directory'
- parameter:
- name: 'arm-pod3-2-defaults'
+ name: 'arm-pod4-defaults'
parameters:
- node:
name: SLAVE_NAME
description: 'Slave name on Jenkins'
allowed-slaves:
- - arm-pod3-2
+ - arm-pod4
default-slaves:
- - arm-pod3-2
+ - arm-pod4
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+- parameter:
+ name: 'arm-virtual1-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-virtual1
+ default-slaves:
+ - arm-virtual1
- string:
name: GIT_BASE
default: https://gerrit.opnfv.org/gerrit/$PROJECT
diff --git a/jjb/joid/joid-daily-jobs.yml b/jjb/joid/joid-daily-jobs.yml
index 7dc718950..13ea9b308 100644
--- a/jjb/joid/joid-daily-jobs.yml
+++ b/jjb/joid/joid-daily-jobs.yml
@@ -164,7 +164,7 @@
# 4.not used for release criteria or compliance,
# only to debug the dovetail tool bugs with joid
#- trigger-builds:
- # - project: 'dovetail-joid-{pod}-debug-{stream}'
+ # - project: 'dovetail-joid-{pod}-proposed_tests-{stream}'
# current-parameters: false
# predefined-parameters:
# DEPLOY_SCENARIO={scenario}
diff --git a/jjb/kvmfornfv/kvmfornfv.yml b/jjb/kvmfornfv/kvmfornfv.yml
index 8d607f985..9624778f8 100644
--- a/jjb/kvmfornfv/kvmfornfv.yml
+++ b/jjb/kvmfornfv/kvmfornfv.yml
@@ -11,7 +11,7 @@
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: false
+ disabled: true
#####################################
# patch verification phases
#####################################
diff --git a/jjb/opera/opera-daily-jobs.yml b/jjb/opera/opera-daily-jobs.yml
index 5d2cc03f3..596d3771f 100644
--- a/jjb/opera/opera-daily-jobs.yml
+++ b/jjb/opera/opera-daily-jobs.yml
@@ -6,30 +6,32 @@
#####################################
# branch definitions
#####################################
- stream:
- - master:
- branch: '{stream}'
- gs-pathname: ''
- disabled: false
+ master: &master
+ stream: master
+ branch: '{stream}'
+ gs-pathname: ''
+ disabled: false
#####################################
-# patch verification phases
+# pod definitions
#####################################
- phase:
- - 'basic'
- - 'deploy'
+ pod:
+ - virtual:
+ slave-label: 'huawei-virtual7'
+ os-version: 'xenial'
+ <<: *master
#####################################
# jobs
#####################################
jobs:
- - 'opera-daily-{stream}'
- - 'opera-daily-{phase}-{stream}'
+ - 'opera-{pod}-daily-{stream}'
+
#####################################
# job templates
#####################################
- job-template:
- name: 'opera-daily-{stream}'
+ name: 'opera-{pod}-daily-{stream}'
project-type: multijob
@@ -62,86 +64,35 @@
- project-parameter:
project: '{project}'
branch: '{branch}'
- - 'huawei-virtual7-defaults'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: os-nosdn-openo-ha
+ - '{slave-label}-defaults'
builders:
- description-setter:
description: "Built on $NODE_NAME"
- multijob:
- name: basic
+ name: deploy
condition: SUCCESSFUL
projects:
- - name: 'opera-daily-basic-{stream}'
- current-parameters: true
+ - name: 'compass-deploy-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters: |
+ DEPLOY_SCENARIO=os-nosdn-openo-ha
+ COMPASS_OS_VERSION=xenial
node-parameters: true
kill-phase-on: FAILURE
abort-all-job: true
- multijob:
- name: deploy
+ name: functest
condition: SUCCESSFUL
projects:
- - name: 'compass-deploy-virtual-daily-{stream}'
+ - name: 'functest-compass-{pod}-suite-{stream}'
current-parameters: false
predefined-parameters: |
DEPLOY_SCENARIO=os-nosdn-openo-ha
- COMPASS_OS_VERSION=xenial
+ FUNCTEST_SUITE_NAME=opera_vims
node-parameters: true
- kill-phase-on: FAILURE
+ kill-phase-on: NEVER
abort-all-job: true
-# - multijob:
-# name: functest
-# condition: SUCCESSFUL
-# projects:
-# - name: 'functest-compass-baremetal-suite-{stream}'
-# current-parameters: false
-# predefined-parameters:
-# FUNCTEST_SUITE_NAME=opera
-# node-parameters: true
-# kill-phase-on: NEVER
-# abort-all-job: true
-
-- job-template:
- name: 'opera-daily-{phase}-{stream}'
-
- disabled: '{obj:disabled}'
-
- concurrent: true
-
- properties:
- - logrotate-default
- - throttle:
- enabled: true
- max-per-node: 1
- option: 'project'
-
- scm:
- - git-scm
-
- wrappers:
- - ssh-agent-wrapper
- - timeout:
- timeout: 120
- fail: true
-
- builders:
- - description-setter:
- description: "Built on $NODE_NAME"
- - '{project}-daily-{phase}-macro'
-
-#####################################
-# builder macros
-#####################################
-- builder:
- name: 'opera-daily-basic-macro'
- builders:
- - shell: |
- #!/bin/bash
- echo "Hello world!"
-
-- builder:
- name: 'opera-daily-deploy-macro'
- builders:
- - shell: |
- #!/bin/bash
- echo "Hello world!"
-
diff --git a/jjb/releng/opnfv-docker-arm.yml b/jjb/releng/opnfv-docker-arm.yml
index ba540ed76..417fc702c 100644
--- a/jjb/releng/opnfv-docker-arm.yml
+++ b/jjb/releng/opnfv-docker-arm.yml
@@ -18,6 +18,11 @@
receivers: >
cristina.pauna@enea.com
alexandru.avadanii@enea.com
+ dovetail-arm-receivers: &dovetail-arm-receivers
+ receivers: >
+ cristina.pauna@enea.com
+ alexandru.avadanii@enea.com
+ alexandru.nemes@enea.com
other-receivers: &other-receivers
receivers: ''
@@ -26,6 +31,9 @@
- 'functest':
<<: *master
<<: *functest-arm-receivers
+ - 'dovetail':
+ <<: *master
+ <<: *dovetail-arm-receivers
# projects with jobs for stable
jobs:
diff --git a/jjb/releng/opnfv-docker.sh b/jjb/releng/opnfv-docker.sh
index 9bd711bc6..2aa52adc5 100644
--- a/jjb/releng/opnfv-docker.sh
+++ b/jjb/releng/opnfv-docker.sh
@@ -17,14 +17,16 @@ echo "Starting opnfv-docker for $DOCKER_REPO_NAME ..."
echo "--------------------------------------------------------"
echo
-
-if [[ -n $(ps -ef|grep 'docker build'|grep -v grep) ]]; then
- echo "There is already another build process in progress:"
- echo $(ps -ef|grep 'docker build'|grep -v grep)
- # Abort this job since it will collide and might mess up the current one.
- echo "Aborting..."
- exit 1
-fi
+count=30 # docker build jobs might take up to ~30 min
+while [[ -n `ps -ef|grep 'docker build'|grep -v grep` ]]; do
+ echo "Build in progress. Waiting..."
+ sleep 60
+ count=$(( $count - 1 ))
+ if [ $count -eq 0 ]; then
+ echo "Timeout. Aborting..."
+ exit 1
+ fi
+done
# Remove previous running containers if exist
if [[ -n "$(docker ps -a | grep $DOCKER_REPO_NAME)" ]]; then
@@ -73,14 +75,11 @@ echo "Current branch: $BRANCH"
if [[ "$BRANCH" == "master" ]]; then
DOCKER_TAG="latest"
+elif [[ -n "${RELEASE_VERSION-}" ]]; then
+ DOCKER_TAG=${BRANCH##*/}.${RELEASE_VERSION}
+ # e.g. danube.1.0, danube.2.0, danube.3.0
else
- if [[ -n "${RELEASE_VERSION-}" ]]; then
- release=${BRANCH##*/}
- DOCKER_TAG=${release}.${RELEASE_VERSION}
- # e.g. colorado.1.0, colorado.2.0, colorado.3.0
- else
- DOCKER_TAG="stable"
- fi
+ DOCKER_TAG="stable"
fi
# Start the build
@@ -88,6 +87,9 @@ echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG"
echo "--------------------------------------------------------"
echo
if [[ $DOCKER_REPO_NAME == *"dovetail"* ]]; then
+ if [[ -n "${RELEASE_VERSION-}" ]]; then
+ DOCKER_TAG=${RELEASE_VERSION}
+ fi
cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG -f $DOCKERFILE ."
else
cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH
diff --git a/jjb/releng/testapi-automate.yml b/jjb/releng/testapi-automate.yml
index dd76538a3..8f3ae0c23 100644
--- a/jjb/releng/testapi-automate.yml
+++ b/jjb/releng/testapi-automate.yml
@@ -258,7 +258,8 @@
name: 'testapi-automate-docker-deploy-macro'
builders:
- shell: |
- bash ./jjb/releng/testapi-docker-deploy.sh
+ echo 'disable TestAPI update temporarily due to frequent change'
+# bash ./jjb/releng/testapi-docker-deploy.sh
################################
# job publishers
diff --git a/jjb/securedlab/check-jinja2.sh b/jjb/securedlab/check-jinja2.sh
new file mode 100755
index 000000000..be4d951ed
--- /dev/null
+++ b/jjb/securedlab/check-jinja2.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+set +x
+set -o errexit
+for lab_configs in $(find labs/ -name 'pod.yaml' | grep -v zte); do
+ while IFS= read -r jinja_templates; do
+ echo "./utils/generate_config.py -y $lab_configs -j $jinja_templates"
+ ./utils/generate_config.py -y $lab_configs -j $jinja_templates
+ done < <(find installers/ -name '*.jinja2')
+done
diff --git a/jjb/securedlab/check-jinja2.yml b/jjb/securedlab/check-jinja2.yml
new file mode 100644
index 000000000..1e85536e7
--- /dev/null
+++ b/jjb/securedlab/check-jinja2.yml
@@ -0,0 +1,80 @@
+########################
+# Job configuration to validate jninja2 files
+########################
+- project:
+
+ name: validate-templates
+
+ project: 'securedlab'
+
+ jobs:
+ - 'validate-jinja2-templates-{stream}'
+
+ stream:
+ - master:
+ branch: '{stream}'
+ disabled: false
+ - danube:
+ branch: 'stable/{stream}'
+ disabled: false
+
+########################
+# job templates
+########################
+
+- job-template:
+ name: 'validate-jinja2-templates-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: true
+
+ parameters:
+ - project-parameter:
+ project: $GERRIT_PROJECT
+ branch: '{branch}'
+ - node:
+ name: SLAVE_NAME
+ description: Slave to execute jnija template test
+ default-slaves:
+ - lf-build1
+ allowed-multiselect: true
+ ignore-offline-nodes: true
+
+ scm:
+ - git-scm-gerrit
+
+ triggers:
+ - gerrit:
+ server-name: 'gerrit.opnfv.org'
+ trigger-on:
+ - patchset-created-event:
+ exclude-drafts: 'false'
+ exclude-trivial-rebase: 'false'
+ exclude-no-code-change: 'false'
+ - draft-published-event
+ - comment-added-contains-event:
+ comment-contains-value: 'recheck'
+ - comment-added-contains-event:
+ comment-contains-value: 'reverify'
+ projects:
+ - project-compare-type: 'REG_EXP'
+ project-pattern: '{project}'
+ branches:
+ - branch-compare-type: 'ANT'
+ branch-pattern: '**/{branch}'
+ file-paths:
+ - compare-type: ANT
+ pattern: 'utils/generate_config.yml'
+ - compare-type: ANT
+ pattern: '**/*.jinja2'
+ - compare-type: ANT
+ pattern: '**/*.yaml'
+ builders:
+ - check-jinja
+
+- builder:
+ name: check-jinja
+ builders:
+ - shell:
+ !include-raw-escape: ./check-jinja2.sh
diff --git a/jjb/test-requirements.txt b/jjb/test-requirements.txt
new file mode 100644
index 000000000..6b700dcfc
--- /dev/null
+++ b/jjb/test-requirements.txt
@@ -0,0 +1 @@
+jenkins-job-builder
diff --git a/jjb/xci/bifrost-cleanup-job.yml b/jjb/xci/bifrost-cleanup-job.yml
index d4b2157da..d5a444d09 100644
--- a/jjb/xci/bifrost-cleanup-job.yml
+++ b/jjb/xci/bifrost-cleanup-job.yml
@@ -69,7 +69,7 @@
while [[ $try_to_rm -lt 6 ]]; do
gsutil -m rm -r $BIFROST_GS_URL && _exitcode=$? && break
_exitcode=$?
- echo "gsutil rm failed! Trying again... (attempt #$i)"
+ echo "gsutil rm failed! Trying again... (attempt #$try_to_rm)"
let try_to_rm += 1
# Give it some time...
sleep 10
diff --git a/jjb/xci/bifrost-periodic-jobs.yml b/jjb/xci/bifrost-periodic-jobs.yml
new file mode 100644
index 000000000..3e9ff678e
--- /dev/null
+++ b/jjb/xci/bifrost-periodic-jobs.yml
@@ -0,0 +1,152 @@
+- project:
+ project: 'releng'
+
+ name: 'bifrost-periodic'
+#--------------------------------
+# Branch Anchors
+#--------------------------------
+# the versions stated here default to branches which then later
+# on used for checking out the branches, pulling in head of the branch.
+ master: &master
+ stream: master
+ openstack-bifrost-version: '{stream}'
+ opnfv-releng-version: 'master'
+ gs-pathname: ''
+ ocata: &ocata
+ stream: ocata
+ openstack-bifrost-version: 'stable/{stream}'
+ opnfv-releng-version: 'master'
+ gs-pathname: '/{stream}'
+#--------------------------------
+# XCI PODs
+#--------------------------------
+ pod:
+ - virtual:
+ <<: *master
+ - virtual:
+ <<: *ocata
+#--------------------------------
+# XCI PODs
+#--------------------------------
+#--------------------------------
+# Supported Distros
+#--------------------------------
+ distro:
+ - 'xenial':
+ disabled: false
+ slave-label: xci-xenial-virtual
+ dib-os-release: 'xenial'
+ dib-os-element: 'ubuntu-minimal'
+ dib-os-packages: 'vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptables'
+ extra-dib-elements: 'openssh-server'
+ - 'centos7':
+ disabled: true
+ slave-label: xci-centos7-virtual
+ dib-os-release: '7'
+ dib-os-element: 'centos7'
+ dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
+ extra-dib-elements: 'openssh-server'
+ - 'suse':
+ disabled: true
+ slave-label: xci-suse-virtual
+ dib-os-release: '42.2'
+ dib-os-element: 'opensuse-minimal'
+ dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
+ extra-dib-elements: 'openssh-server'
+
+#--------------------------------
+# jobs
+#--------------------------------
+ jobs:
+ - 'bifrost-provision-{pod}-{distro}-periodic-{stream}'
+
+#--------------------------------
+# job templates
+#--------------------------------
+- job-template:
+ name: 'bifrost-provision-{pod}-{distro}-periodic-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: false
+
+ properties:
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - '^xci-os.*'
+ - '^xci-deploy.*'
+ - '^xci-functest.*'
+ - '^bifrost-.*periodic.*'
+ - '^osa-.*periodic.*'
+ block-level: 'NODE'
+ - logrotate-default
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{opnfv-releng-version}'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ - string:
+ name: XCI_FLAVOR
+ default: 'ha'
+ - string:
+ name: OPENSTACK_BIFROST_VERSION
+ default: '{openstack-bifrost-version}'
+ - string:
+ name: OPNFV_RELENG_VERSION
+ default: '{opnfv-releng-version}'
+ - string:
+ name: DISTRO
+ default: '{distro}'
+ - string:
+ name: DIB_OS_RELEASE
+ default: '{dib-os-release}'
+ - string:
+ name: DIB_OS_ELEMENT
+ default: '{dib-os-element}'
+ - string:
+ name: DIB_OS_PACKAGES
+ default: '{dib-os-packages}'
+ - string:
+ name: EXTRA_DIB_ELEMENTS
+ default: '{extra-dib-elements}'
+ - string:
+ name: CLEAN_DIB_IMAGES
+ default: 'true'
+ - label:
+ name: SLAVE_LABEL
+ default: '{slave-label}'
+ - string:
+ name: ANSIBLE_VERBOSITY
+ default: ''
+ - string:
+ name: XCI_LOOP
+ default: 'periodic'
+
+ wrappers:
+ - fix-workspace-permissions
+
+ scm:
+ - git-scm
+
+ # trigger is disabled until we know which jobs we will have
+ # and adjust stuff accordingly
+ triggers:
+ - timed: '#@midnight'
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME - Flavor: $XCI_FLAVOR"
+ - 'bifrost-provision-builder'
+
+#---------------------------
+# builder macros
+#---------------------------
+- builder:
+ name: bifrost-provision-builder
+ builders:
+ - shell:
+ !include-raw: ./bifrost-provision.sh
diff --git a/jjb/xci/xci-provision.sh b/jjb/xci/bifrost-provision.sh
index 47a96767f..4724c2ee5 100755
--- a/jjb/xci/xci-provision.sh
+++ b/jjb/xci/bifrost-provision.sh
@@ -70,12 +70,8 @@ cd /opt/releng && sudo git checkout --quiet $OPNFV_RELENG_VERSION
echo "xci: using releng commit"
git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>'
-# this script will be reused for promoting bifrost versions and using
-# promoted bifrost versions as part of xci daily.
-USE_PROMOTED_VERSIONS=${USE_PROMOTED_VERSIONS:-false}
-if [ $USE_PROMOTED_VERSIONS = "true" ]; then
- echo "TBD: Will use the promoted versions of openstack/opnfv projects"
-fi
+# source flavor vars
+source "$WORKSPACE/prototypes/xci/config/${XCI_FLAVOR}-vars"
# combine opnfv and upstream scripts/playbooks
sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
@@ -84,7 +80,7 @@ sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
cd /opt/bifrost
sudo -E ./scripts/destroy-env.sh
-# provision 6 VMs; xcimaster, controller00, controller01, controller02, compute00, and compute01
+# provision VMs for the flavor
cd /opt/bifrost
sudo -E ./scripts/bifrost-provision.sh
@@ -94,11 +90,20 @@ source env-vars
ironic node-list
virsh list
+echo "OpenStack nodes are provisioned!"
+# here we have to do something in order to capture what was the working sha1
+# hardcoding stuff for the timebeing
+
+cd /opt/bifrost
+BIFROST_GIT_SHA1=$(git rev-parse HEAD)
+
# log some info
echo -e "\n"
echo "***********************************************************************"
+echo "* BIFROST SHA1 TO PIN *"
echo "* *"
-echo "* OpenStack nodes are provisioned! *"
+echo " $BIFROST_GIT_SHA1"
echo "* *"
echo "***********************************************************************"
+
echo -e "\n"
diff --git a/jjb/xci/bifrost-verify-jobs.yml b/jjb/xci/bifrost-verify-jobs.yml
index b93456ee2..806829620 100644
--- a/jjb/xci/bifrost-verify-jobs.yml
+++ b/jjb/xci/bifrost-verify-jobs.yml
@@ -55,7 +55,7 @@
- defaults:
name: verify_vm_defaults
test-vm-num-nodes: '3'
- test-vm-node-names: 'xcimaster controller00 compute00'
+ test-vm-node-names: 'opnfv controller00 compute00'
vm-domain-type: 'kvm'
vm-cpu: '2'
vm-disk: '30'
@@ -140,6 +140,9 @@
- string:
name: ANSIBLE_VERBOSITY
default: '-vvvv'
+ - string:
+ name: XCI_LOOP
+ default: 'verify'
scm:
- git:
diff --git a/jjb/xci/osa-periodic-jobs.yml b/jjb/xci/osa-periodic-jobs.yml
new file mode 100644
index 000000000..56a4b18b4
--- /dev/null
+++ b/jjb/xci/osa-periodic-jobs.yml
@@ -0,0 +1,149 @@
+- project:
+ project: 'releng'
+
+ name: 'os-periodic'
+#--------------------------------
+# Branch Anchors
+#--------------------------------
+# the versions stated here default to branches which then later
+# on used for checking out the branches, pulling in head of the branch.
+ master: &master
+ stream: master
+ openstack-osa-version: '{stream}'
+ opnfv-releng-version: 'master'
+ gs-pathname: ''
+ ocata: &ocata
+ stream: ocata
+ openstack-osa-version: 'stable/{stream}'
+ opnfv-releng-version: 'master'
+ gs-pathname: '/{stream}'
+#--------------------------------
+# XCI PODs
+#--------------------------------
+ pod:
+ - virtual:
+ <<: *master
+ - virtual:
+ <<: *ocata
+#--------------------------------
+# Supported Distros
+#--------------------------------
+ distro:
+ - 'xenial':
+ disabled: false
+ slave-label: xci-xenial-virtual
+ dib-os-release: 'xenial'
+ dib-os-element: 'ubuntu-minimal'
+ dib-os-packages: 'vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptables'
+ extra-dib-elements: 'openssh-server'
+ - 'centos7':
+ disabled: true
+ slave-label: xci-centos7-virtual
+ dib-os-release: '7'
+ dib-os-element: 'centos7'
+ dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
+ extra-dib-elements: 'openssh-server'
+ - 'suse':
+ disabled: true
+ slave-label: xci-suse-virtual
+ dib-os-release: '42.2'
+ dib-os-element: 'opensuse-minimal'
+ dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
+ extra-dib-elements: 'openssh-server'
+
+#--------------------------------
+# jobs
+#--------------------------------
+ jobs:
+ - 'osa-deploy-{pod}-{distro}-periodic-{stream}'
+
+#--------------------------------
+# job templates
+#--------------------------------
+- job-template:
+ name: 'osa-deploy-{pod}-{distro}-periodic-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: false
+
+ properties:
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - '^xci-os.*'
+ - '^xci-deploy.*'
+ - '^xci-functest.*'
+ - '^bifrost-.*periodic.*'
+ - '^osa-.*periodic.*'
+ block-level: 'NODE'
+ - logrotate-default
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{opnfv-releng-version}'
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ - string:
+ name: XCI_FLAVOR
+ default: 'ha'
+ - string:
+ name: OPENSTACK_OSA_VERSION
+ default: '{openstack-osa-version}'
+ - string:
+ name: OPNFV_RELENG_VERSION
+ default: '{opnfv-releng-version}'
+ - string:
+ name: DISTRO
+ default: '{distro}'
+ - string:
+ name: DIB_OS_RELEASE
+ default: '{dib-os-release}'
+ - string:
+ name: DIB_OS_ELEMENT
+ default: '{dib-os-element}'
+ - string:
+ name: DIB_OS_PACKAGES
+ default: '{dib-os-packages}'
+ - string:
+ name: EXTRA_DIB_ELEMENTS
+ default: '{extra-dib-elements}'
+ - string:
+ name: CLEAN_DIB_IMAGES
+ default: 'true'
+ - label:
+ name: SLAVE_LABEL
+ default: '{slave-label}'
+ - string:
+ name: ANSIBLE_VERBOSITY
+ default: ''
+ - string:
+ name: XCI_LOOP
+ default: 'periodic'
+
+ wrappers:
+ - fix-workspace-permissions
+
+ scm:
+ - git-scm
+
+ # trigger is disabled until we know which jobs we will have
+ # and adjust stuff accordingly
+ triggers:
+ - timed: '#@midnight'
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME - Scenario: $DEPLOY_SCENARIO"
+ - 'osa-deploy-builder'
+
+#---------------------------
+# builder macros
+#---------------------------
+- builder:
+ name: osa-deploy-builder
+ builders:
+ - shell:
+ !include-raw: ./xci-deploy.sh
diff --git a/jjb/xci/xci-daily-jobs.yml b/jjb/xci/xci-daily-jobs.yml
index dbe3b654b..64e13d3eb 100644
--- a/jjb/xci/xci-daily-jobs.yml
+++ b/jjb/xci/xci-daily-jobs.yml
@@ -1,34 +1,37 @@
+#--------------------------------
+# These jobs run on a daily basis and deploy OpenStack
+# using the pinned versions of opnfv/releng, openstack/bifrost
+# and openstack/openstack-ansible. Due to this, there is no
+# version/branch is set/passed to jobs and instead the versions
+# are checked out based on what is configured.
+#--------------------------------
- project:
- name: 'bifrost-osa-daily'
+ project: 'releng'
+
+ name: 'xci-daily'
#--------------------------------
-# BRANCH ANCHORS
+# Branch Anchors
#--------------------------------
-# the versions stated here default to branches which then later
-# on used for checking out the branches, pulling in head of the branch.
-# but they also allow us to state sha1 so instead of checking out the
-# branches, we can check out sha1 if we want to use locked/specific
-# sha1 or manually enter sha1.
master: &master
stream: master
- openstack-osa-version: '{stream}'
- openstack-bifrost-version: '{stream}'
- opnfv-releng-version: 'master'
+ opnfv-releng-version: master
gs-pathname: ''
ocata: &ocata
stream: ocata
- openstack-osa-version: 'stable/{stream}'
- openstack-bifrost-version: 'stable/{stream}'
- opnfv-releng-version: 'master'
+ opnfv-releng-version: master
gs-pathname: '/{stream}'
#--------------------------------
-# scenarios
+# Scenarios
#--------------------------------
scenario:
- # HA scenarios
- 'os-nosdn-nofeature-ha':
auto-trigger-name: 'daily-trigger-disabled'
+ xci-flavor: 'ha'
+ - 'os-nosdn-nofeature-noha':
+ auto-trigger-name: 'daily-trigger-disabled'
+ xci-flavor: 'noha'
#--------------------------------
-# XCI PODs
+# XCI PODs
#--------------------------------
pod:
- virtual:
@@ -36,7 +39,7 @@
- virtual:
<<: *ocata
#--------------------------------
-# Supported Distros
+# Supported Distros
#--------------------------------
distro:
- 'xenial':
@@ -44,7 +47,7 @@
slave-label: xci-xenial-virtual
dib-os-release: 'xenial'
dib-os-element: 'ubuntu-minimal'
- dib-os-packages: 'vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony'
+ dib-os-packages: 'vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptabls'
extra-dib-elements: 'openssh-server'
- 'centos7':
disabled: true
@@ -65,7 +68,6 @@
# Phases
#--------------------------------
phase:
- - 'provision'
- 'deploy'
- 'functest'
#--------------------------------
@@ -76,19 +78,6 @@
- 'xci-{phase}-{pod}-{distro}-daily-{stream}'
#--------------------------------
-# VM defaults
-#--------------------------------
-- defaults:
- name: daily_vm_defaults
- test-vm-num-nodes: '6'
- test-vm-node-names: 'xcimaster controller00 controller01 controller02 compute00 compute01'
- vm-domain-type: 'kvm'
- vm-cpu: '8'
- vm-disk: '100'
- vm-memory-size: '16384'
- vm-disk-cache: 'unsafe'
-
-#--------------------------------
# job templates
#--------------------------------
- job-template:
@@ -103,69 +92,52 @@
use-build-blocker: true
blocking-jobs:
- '^xci-os.*'
+ - '^xci-deploy.*'
+ - '^xci-functest.*'
+ - '^bifrost-.*periodic.*'
+ - '^osa-.*periodic.*'
block-level: 'NODE'
- logrotate-default
parameters:
- string:
- name: OPENSTACK_OSA_VERSION
- default: '{openstack-osa-version}'
- - string:
- name: OPENSTACK_BIFROST_VERSION
- default: '{openstack-osa-version}'
- - string:
- name: OPNFV_RELENG_VERSION
- default: '{opnfv-releng-version}'
- - string:
- name: USE_PROMOTED_VERSIONS
- default: 'true'
- - string:
name: DEPLOY_SCENARIO
default: '{scenario}'
+ - string:
+ name: XCI_FLAVOR
+ default: '{xci-flavor}'
- label:
name: SLAVE_LABEL
default: '{slave-label}'
+ - string:
+ name: XCI_LOOP
+ default: 'daily'
triggers:
- '{auto-trigger-name}'
wrappers:
- - xci-fix-perms-workspace
+ - fix-workspace-permissions
builders:
- description-setter:
description: "Built on $NODE_NAME"
- trigger-builds:
- - project: 'xci-provision-{pod}-{distro}-daily-{stream}'
- current-parameters: false
- predefined-parameters: |
- OPENSTACK_OSA_VERSION=$OPENSTACK_OSA_VERSION
- OPENSTACK_BIFROST_VERSION=$OPENSTACK_BIFROST_VERSION
- OPNFV_RELENG_VERSION=$OPNFV_RELENG_VERSION
- USE_PROMOTED_VERSIONS=$USE_PROMOTED_VERSIONS
- DEPLOY_SCENARIO=$DEPLOY_SCENARIO
- same-node: true
- block: true
- - trigger-builds:
- project: 'xci-deploy-{pod}-{distro}-daily-{stream}'
current-parameters: false
predefined-parameters: |
- OPENSTACK_OSA_VERSION=$OPENSTACK_OSA_VERSION
- OPENSTACK_BIFROST_VERSION=$OPENSTACK_BIFROST_VERSION
- OPNFV_RELENG_VERSION=$OPNFV_RELENG_VERSION
- USE_PROMOTED_VERSIONS=$USE_PROMOTED_VERSIONS
DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+ XCI_FLAVOR=$XCI_FLAVOR
+ XCI_LOOP=$XCI_LOOP
same-node: true
block: true
- trigger-builds:
- project: 'xci-functest-{pod}-{distro}-daily-{stream}'
current-parameters: false
predefined-parameters: |
- OPENSTACK_OSA_VERSION=$OPENSTACK_OSA_VERSION
- OPENSTACK_BIFROST_VERSION=$OPENSTACK_BIFROST_VERSION
- OPNFV_RELENG_VERSION=$OPNFV_RELENG_VERSION
- USE_PROMOTED_VERSIONS=$USE_PROMOTED_VERSIONS
DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+ XCI_FLAVOR=$XCI_FLAVOR
+ XCI_LOOP=$XCI_LOOP
same-node: true
block: true
block-thresholds:
@@ -182,37 +154,39 @@
disabled: '{obj:disabled}'
- defaults: daily_vm_defaults
-
concurrent: false
properties:
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - '^xci-provision.*'
- '^xci-deploy.*'
- '^xci-functest.*'
+ - '^bifrost-.*periodic.*'
+ - '^osa-.*periodic.*'
block-level: 'NODE'
- logrotate-default
+ wrappers:
+ - fix-workspace-permissions
+
+ scm:
+ - git-scm
+
parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{opnfv-releng-version}'
- string:
- name: OPENSTACK_OSA_VERSION
- default: '{openstack-osa-version}'
- - string:
- name: OPENSTACK_BIFROST_VERSION
- default: '{openstack-osa-version}'
- - string:
- name: OPNFV_RELENG_VERSION
- default: '{opnfv-releng-version}'
- - string:
- name: USE_PROMOTED_VERSIONS
- default: 'true'
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
- string:
name: DEPLOY_SCENARIO
default: 'os-nosdn-nofeature-ha'
- string:
+ name: XCI_FLAVOR
+ default: 'ha'
+ - string:
name: DISTRO
default: '{distro}'
- string:
@@ -222,32 +196,11 @@
name: DIB_OS_ELEMENT
default: '{dib-os-element}'
- string:
- name: EXTRA_DIB_ELEMENTS
- default: '{extra-dib-elements}'
- - string:
name: DIB_OS_PACKAGES
default: '{dib-os-packages}'
- string:
- name: TEST_VM_NUM_NODES
- default: '{test-vm-num-nodes}'
- - string:
- name: TEST_VM_NODE_NAMES
- default: '{test-vm-node-names}'
- - string:
- name: VM_DOMAIN_TYPE
- default: '{vm-domain-type}'
- - string:
- name: VM_CPU
- default: '{vm-cpu}'
- - string:
- name: VM_DISK
- default: '{vm-disk}'
- - string:
- name: VM_MEMORY_SIZE
- default: '{vm-memory-size}'
- - string:
- name: VM_DISK_CACHE
- default: '{vm-disk-cache}'
+ name: EXTRA_DIB_ELEMENTS
+ default: '{extra-dib-elements}'
- string:
name: CLEAN_DIB_IMAGES
default: 'true'
@@ -257,9 +210,9 @@
- string:
name: ANSIBLE_VERBOSITY
default: ''
-
- wrappers:
- - xci-fix-perms-workspace
+ - string:
+ name: XCI_LOOP
+ default: 'daily'
builders:
- description-setter:
@@ -267,29 +220,14 @@
- 'xci-{phase}-builder'
#---------------------------
-# wrapper macros
-#---------------------------
-- wrapper:
- name: xci-fix-perms-workspace
- wrappers:
- - pre-scm-buildstep:
- - shell: |
- #!/bin/bash
- sudo chown -R $USER $WORKSPACE || exit 1
-
-#---------------------------
# builder macros
#---------------------------
- builder:
- name: xci-provision-builder
- builders:
- - shell:
- !include-raw: ./xci-provision.sh
-- builder:
name: xci-deploy-builder
builders:
- shell:
!include-raw: ./xci-deploy.sh
+
- builder:
name: xci-functest-builder
builders:
diff --git a/jjb/xci/xci-deploy.sh b/jjb/xci/xci-deploy.sh
index 87f9ec8db..b007b852f 100755
--- a/jjb/xci/xci-deploy.sh
+++ b/jjb/xci/xci-deploy.sh
@@ -11,83 +11,65 @@ set -o errexit
set -o nounset
set -o pipefail
-trap cleanup_and_upload EXIT
+cd $WORKSPACE/prototypes/xci
-function fix_ownership() {
- if [ -z "${JOB_URL+x}" ]; then
- echo "Not running as part of Jenkins. Handle the logs manually."
- else
- # Make sure cache exists
- [[ ! -d ${HOME}/.cache ]] && mkdir ${HOME}/.cache
+# for daily jobs, we want to use working versions
+# for periodic jobs, we will use whatever is set in the job, probably master
+if [[ "$XCI_LOOP" == "daily" ]]; then
+ # source pinned-vars to get releng version
+ source ./config/pinned-versions
- sudo chown -R jenkins:jenkins $WORKSPACE
- sudo chown -R jenkins:jenkins ${HOME}/.cache
- fi
-}
-
-function cleanup_and_upload() {
- original_exit=$?
- fix_ownership
- exit $original_exit
-}
-
-# check distro to see if we support it
-if [[ ! "$DISTRO" =~ (xenial|centos7|suse) ]]; then
- echo "Distro $DISTRO is not supported!"
- exit 1
+ # checkout the version
+ git checkout -q $OPNFV_RELENG_VERSION
+ echo "Info: Using $OPNFV_RELENG_VERSION"
+elif [[ "$XCI_LOOP" == "periodic" ]]; then
+ echo "Info: Using $OPNFV_RELENG_VERSION"
fi
-# remove previously cloned repos
-sudo /bin/rm -rf /opt/openstack-ansible /opt/stack /opt/releng /opt/functest
-
-# Fix up permissions
-fix_ownership
-
-# openstack-ansible enables strict host key checking by default
-export ANSIBLE_HOST_KEY_CHECKING=False
-
-# ensure the versions to checkout are set
-export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-master}
-export OPNFV_RELENG_VERSION=${OPNFV_RELENG_VERSION:-master}
+# this is just an example to give the idea about what we need to do
+# so ignore this part for the timebeing as we need to adjust xci-deploy.sh
+# to take this into account while deploying anyways
+# clone openstack-ansible
+# stable/ocata already use pinned versions so this is only valid for master
+if [[ "$XCI_LOOP" == "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
+ cd $WORKSPACE
+ # get the url to openstack-ansible git
+ source ./config/env-vars
+ echo "Info: Capture the ansible role requirement versions before doing anything"
+ git clone -q $OPENSTACK_OSA_GIT_URL
+ cd openstack-ansible
+ cat ansible-role-requirements.yml | while IFS= read -r line
+ do
+ if [[ $line =~ "src:" ]]; then
+ repo_url=$(echo $line | awk {'print $2'})
+ repo_sha1=$(git ls-remote $repo_url $OPENSTACK_OSA_VERSION | awk {'print $1'})
+ fi
+ echo "$line" | sed -e "s|master|$repo_sha1|" >> opnfv-ansible-role-requirements.yml
+ done
+ echo "Info: SHA1s of ansible role requirements"
+ echo "-------------------------------------------------------------------------"
+ cat opnfv-ansible-role-requirements.yml
+ echo "-------------------------------------------------------------------------"
+fi
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "* *"
-echo "* Deploy OpenStack *"
-echo "* *"
-echo " openstack-ansible version: $OPENSTACK_OSA_VERSION"
-echo " releng version: $OPNFV_RELENG_VERSION"
-echo "* *"
-echo "***********************************************************************"
-echo -e "\n"
-# clone releng repo
-sudo git clone --quiet https://gerrit.opnfv.org/gerrit/releng /opt/releng
-cd /opt/releng && sudo git checkout --quiet $OPNFV_RELENG_VERSION
-echo "xci: using openstack-ansible commit"
-git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>'
+# proceed with the deployment
+cd $WORKSPACE/prototypes/xci
+sudo -E ./xci-deploy.sh
-# display the nodes
-echo "xci: OpenStack nodes"
-cd /opt/bifrost
-source env-vars
-ironic node-list
+if [[ "$JOB_NAME" =~ "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
+ # if we arrived here without failing, it means we have something we can pin
+ # this is again here to show the intention
+ cd $WORKSPACE/openstack-ansible
+ OSA_GIT_SHA1=$(git rev-parse HEAD)
-# this script will be reused for promoting openstack-ansible versions and using
-# promoted openstack-ansible versions as part of xci daily.
-USE_PROMOTED_VERSIONS=${USE_PROMOTED_VERSIONS:-false}
-if [ $USE_PROMOTED_VERSIONS = "true" ]; then
- echo "TBD: Will use the promoted versions of openstack/opnfv projects"
+ # log some info
+ echo -e "\n"
+ echo "***********************************************************************"
+ echo "* OSA SHA1 TO PIN *"
+ echo "* *"
+ echo " $OSA_GIT_SHA1"
+ echo "* *"
+ echo "***********************************************************************"
fi
-cd /opt/releng/prototypes/openstack-ansible/scripts
-sudo -E ./osa-deploy.sh
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "* *"
-echo "* OpenStack deployment is completed! *"
-echo "* *"
-echo "***********************************************************************"
echo -e "\n"
diff --git a/jjb/yardstick/yardstick-ci-jobs.yml b/jjb/yardstick/yardstick-ci-jobs.yml
index 1f2f3122c..5ff36f842 100644
--- a/jjb/yardstick/yardstick-ci-jobs.yml
+++ b/jjb/yardstick/yardstick-ci-jobs.yml
@@ -182,6 +182,16 @@
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *danube
+ - arm-virtual1:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - arm-virtual1:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *danube
- orange-pod2:
slave-label: '{pod}'
installer: joid
@@ -338,6 +348,13 @@
default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
+ name: 'yardstick-params-arm-virtual1'
+ parameters:
+ - string:
+ name: YARDSTICK_DB_BACKEND
+ default: '-i 104.197.68.199:8086'
+ description: 'Arguments to use in order to choose the backend DB'
+- parameter:
name: 'yardstick-params-joid-baremetal'
parameters:
- string:
diff --git a/jjb/yardstick/yardstick-daily.sh b/jjb/yardstick/yardstick-daily.sh
index f769e9cdd..973f83ad5 100755
--- a/jjb/yardstick/yardstick-daily.sh
+++ b/jjb/yardstick/yardstick-daily.sh
@@ -18,7 +18,7 @@ if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
# If production lab then creds may be retrieved dynamically
# creds are on the jumphost, always in the same folder
- labconfig="-v $LAB_CONFIG/admin-openrc:/home/opnfv/openrc"
+ labconfig="-v $LAB_CONFIG/admin-openrc:/etc/yardstick/openstack.creds"
# If dev lab, credentials may not be the default ones, just provide a path to put them into docker
# replace the default one by the customized one provided by jenkins config
fi
diff --git a/prototypes/xci/file/aio/openstack_user_config.yml b/modules/opnfv/deployment/compass/__init__.py
index e69de29bb..e69de29bb 100644
--- a/prototypes/xci/file/aio/openstack_user_config.yml
+++ b/modules/opnfv/deployment/compass/__init__.py
diff --git a/modules/opnfv/deployment/compass/adapter.py b/modules/opnfv/deployment/compass/adapter.py
new file mode 100644
index 000000000..38aa45227
--- /dev/null
+++ b/modules/opnfv/deployment/compass/adapter.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import json
+import netaddr
+import re
+
+from opnfv.deployment import manager
+from opnfv.utils import opnfv_logger as logger
+from opnfv.utils import ssh_utils
+
+logger = logger.Logger(__name__).getLogger()
+
+
+class CompassAdapter(manager.DeploymentHandler):
+
+ def __init__(self, installer_ip, installer_user, installer_pwd):
+ super(CompassAdapter, self).__init__(installer='compass',
+ installer_ip=installer_ip,
+ installer_user=installer_user,
+ installer_pwd=installer_pwd,
+ pkey_file=None)
+
+ def get_nodes(self, options=None):
+ nodes = []
+ self.deployment_status = None
+ self.nodes_dict = self._get_deployment_nodes()
+ self.deployment_status = self.get_deployment_status()
+
+ for k, v in self.nodes_dict.iteritems():
+ node = manager.Node(v['id'], v['ip'],
+ k, v['status'],
+ v['roles'], v['ssh_client'], v['mac'])
+ nodes.append(node)
+
+ self.get_nodes_called = True
+ return nodes
+
+ def get_openstack_version(self):
+ version = None
+ cmd = 'source /opt/admin-openrc.sh;nova-manage version 2>/dev/null'
+ version = next(node.run_cmd(cmd) for node in self.nodes
+ if node.is_controller())
+ return version
+
+ def get_sdn_version(self):
+ for node in self.nodes:
+ if node.is_odl():
+ sdn_info = self._get_sdn_info(node, manager.Role.ODL)
+ break
+ elif node.is_onos():
+ sdn_info = self._get_sdn_info(node, manager.Role.ONOS)
+ break
+ else:
+ sdn_info = None
+ return sdn_info
+
+ def _get_sdn_info(self, node, sdn_type):
+ if sdn_type == manager.Role.ODL:
+ sdn_key = 'distribution-karaf'
+ elif sdn_type == manager.Role.ONOS:
+ sdn_key = 'onos-'
+ else:
+ raise KeyError('SDN %s is not supported', sdn_type)
+
+ cmd = "find /opt -name '{0}*'".format(sdn_key)
+ sdn_info = node.run_cmd(cmd)
+ sdn_version = 'None'
+ if sdn_info:
+ # /opt/distribution-karaf-0.5.2-Boron-SR2.tar.gz
+ match_sdn = re.findall(r".*(0\.\d\.\d).*", sdn_info)
+ if (match_sdn and len(match_sdn) >= 1):
+ sdn_version = match_sdn[0]
+ sdn_version = '{0} {1}'.format(sdn_type, sdn_version)
+ return sdn_version
+
+ def get_deployment_status(self):
+ if self.deployment_status is not None:
+ logger.debug('Skip - Node status has been retrieved once')
+ return self.deployment_status
+
+ for k, v in self.nodes_dict.iteritems():
+ if manager.Role.CONTROLLER in v['roles']:
+ cmd = 'source /opt/admin-openrc.sh; nova hypervisor-list;'
+ '''
+ +----+---------------------+-------+---------+
+
+ | ID | Hypervisor hostname | State | Status |
+
+ +----+---------------------+-------+---------+
+
+ | 3 | host4 | up | enabled |
+
+ | 6 | host5 | up | enabled |
+
+ +----+---------------------+-------+---------+
+ '''
+ _, stdout, stderr = (v['ssh_client'].exec_command(cmd))
+ error = stderr.readlines()
+ if len(error) > 0:
+ logger.error("error %s" % ''.join(error))
+ status = manager.NodeStatus.STATUS_ERROR
+ v['status'] = status
+ continue
+
+ lines = stdout.readlines()
+ for i in range(3, len(lines) - 1):
+ fields = lines[i].strip().encode().rsplit(' | ')
+ hostname = fields[1].strip().encode().lower()
+ state = fields[2].strip().encode().lower()
+ if 'up' == state:
+ status = manager.NodeStatus.STATUS_OK
+ else:
+ status = manager.NodeStatus.STATUS_ERROR
+ self.nodes_dict[hostname]['status'] = status
+ v['status'] = manager.NodeStatus.STATUS_OK
+
+ failed_nodes = [k for k, v in self.nodes_dict.iteritems()
+ if v['status'] != manager.NodeStatus.STATUS_OK]
+
+ if failed_nodes and len(failed_nodes) > 0:
+ return 'Hosts {0} failed'.format(','.join(failed_nodes))
+
+ return 'active'
+
+ def _get_deployment_nodes(self):
+ sql_query = ('select host.host_id, host.roles, '
+ 'network.ip_int, machine.mac from clusterhost as host, '
+ 'host_network as network, machine as machine '
+ 'where host.host_id=network.host_id '
+ 'and host.id=machine.id;')
+ cmd = 'mysql -uroot -Dcompass -e "{0}"'.format(sql_query)
+ logger.debug('mysql command: %s', cmd)
+ output = self.installer_node.run_cmd(cmd)
+ '''
+ host_id roles ip_int mac
+ 1 ["controller", "ha", "odl", "ceph-adm", "ceph-mon"]
+ 167837746 00:00:e3:ee:a8:63
+ 2 ["controller", "ha", "odl", "ceph-mon"]
+ 167837747 00:00:31:1d:16:7a
+ 3 ["controller", "ha", "odl", "ceph-mon"]
+ 167837748 00:00:0c:bf:eb:01
+ 4 ["compute", "ceph-osd"] 167837749 00:00:d8:22:6f:59
+ 5 ["compute", "ceph-osd"] 167837750 00:00:75:d5:6b:9e
+ '''
+ lines = output.encode().rsplit('\n')
+ nodes_dict = {}
+ if (not lines or len(lines) < 2):
+ logger.error('No nodes are found in the deployment.')
+ return nodes_dict
+
+ proxy = {'ip': self.installer_ip,
+ 'username': self.installer_user,
+ 'password': self.installer_pwd}
+ for i in range(1, len(lines)):
+ fields = lines[i].strip().encode().rsplit('\t')
+ host_id = fields[0].strip().encode()
+ name = 'host{0}'.format(host_id)
+ node_roles_str = fields[1].strip().encode().lower()
+ node_roles_list = json.loads(node_roles_str)
+ node_roles = [manager.Role.ODL if x == 'odl'
+ else x for x in node_roles_list]
+ roles = [x for x in [manager.Role.CONTROLLER,
+ manager.Role.COMPUTE,
+ manager.Role.ODL,
+ manager.Role.ONOS] if x in node_roles]
+ ip = fields[2].strip().encode()
+ ip = str(netaddr.IPAddress(ip))
+ mac = fields[3].strip().encode()
+
+ nodes_dict[name] = {}
+ nodes_dict[name]['id'] = host_id
+ nodes_dict[name]['roles'] = roles
+ nodes_dict[name]['ip'] = ip
+ nodes_dict[name]['mac'] = mac
+ ssh_client = ssh_utils.get_ssh_client(hostname=ip,
+ username='root',
+ proxy=proxy)
+ nodes_dict[name]['ssh_client'] = ssh_client
+ nodes_dict[name]['status'] = manager.NodeStatus.STATUS_UNKNOWN
+ return nodes_dict
diff --git a/modules/opnfv/deployment/example.py b/modules/opnfv/deployment/example.py
index 3999a11c6..52d9b5630 100644
--- a/modules/opnfv/deployment/example.py
+++ b/modules/opnfv/deployment/example.py
@@ -34,3 +34,17 @@ print("List of nodes in cluster 4:")
nodes = handler.get_nodes({'cluster': '4'})
for node in nodes:
print(node)
+
+
+print("########## COMPASS ##########")
+handler = factory.Factory.get_handler('compass',
+ '192.168.200.2',
+ 'root',
+ installer_pwd='root')
+
+print(handler.get_deployment_status())
+print(handler.get_deployment_info())
+print('Details of each node:')
+nodes = handler.nodes
+for node in nodes:
+ print(node)
diff --git a/modules/opnfv/deployment/factory.py b/modules/opnfv/deployment/factory.py
index 1ccee4e80..b8e5c8ef4 100644
--- a/modules/opnfv/deployment/factory.py
+++ b/modules/opnfv/deployment/factory.py
@@ -9,6 +9,7 @@
from opnfv.deployment.apex import adapter as apex_adapter
+from opnfv.deployment.compass import adapter as compass_adapter
from opnfv.deployment.fuel import adapter as fuel_adapter
from opnfv.utils import opnfv_logger as logger
@@ -40,6 +41,11 @@ class Factory(object):
return fuel_adapter.FuelAdapter(installer_ip=installer_ip,
installer_user=installer_user,
installer_pwd=installer_pwd)
+ elif installer.lower() == "compass":
+ return compass_adapter.CompassAdapter(
+ installer_ip=installer_ip,
+ installer_user=installer_user,
+ installer_pwd=installer_pwd)
else:
raise Exception("Installer adapter is not implemented for "
"the given installer.")
diff --git a/modules/opnfv/deployment/manager.py b/modules/opnfv/deployment/manager.py
index df735f157..694df7755 100644
--- a/modules/opnfv/deployment/manager.py
+++ b/modules/opnfv/deployment/manager.py
@@ -108,6 +108,7 @@ class NodeStatus():
STATUS_OFFLINE = 'offline'
STATUS_ERROR = 'error'
STATUS_UNUSED = 'unused'
+ STATUS_UNKNOWN = 'unknown'
class Node(object):
@@ -229,6 +230,12 @@ class Node(object):
'''
return Role.ODL in self.roles
+ def is_onos(self):
+ '''
+ Returns if the node is an ONOS
+ '''
+ return Role.ONOS in self.roles
+
def get_ovs_info(self):
'''
Returns the ovs version installed
@@ -383,4 +390,4 @@ class DeploymentHandler(object):
pod=os.getenv('NODE_NAME', 'Unknown'),
openstack_version=self.get_openstack_version(),
sdn_controller=self.get_sdn_version(),
- nodes=self.get_nodes())
+ nodes=self.nodes)
diff --git a/modules/requirements.txt b/modules/requirements.txt
new file mode 100644
index 000000000..1eaf8d089
--- /dev/null
+++ b/modules/requirements.txt
@@ -0,0 +1,3 @@
+paramiko>=2.0.1
+mock==1.3.0
+requests==2.9.1
diff --git a/modules/test-requirements.txt b/modules/test-requirements.txt
new file mode 100644
index 000000000..99d7f1313
--- /dev/null
+++ b/modules/test-requirements.txt
@@ -0,0 +1,6 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+nose
+coverage
diff --git a/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic-inspector.conf.j2 b/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic-inspector.conf.j2
deleted file mode 100644
index dc4e3ffad..000000000
--- a/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic-inspector.conf.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-{#
-# Note(TheJulia): This file is based upon the file format provided by the git
-# committed example located at:
-# http://git.openstack.org/cgit/openstack/ironic-inspector/tree/example.conf
-#}
-[DEFAULT]
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-auth_strategy = keystone
-{% else %}
-auth_strategy = {{ inspector_auth | default('noauth') }}
-{% endif %}
-debug = {{ inspector_debug | bool }}
-
-[database]
-connection=mysql+pymysql://inspector:{{ ironic_db_password }}@localhost/inspector?charset=utf8
-min_pool_size = 1
-max_pool_size = 5
-
-[firewall]
-manage_firewall = {{ inspector_manage_firewall | bool | default('false') }}
-
-[ironic]
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-os_region = {{ keystone.bootstrap.region_name | default('RegionOne') }}
-project_name = baremetal
-username = {{ ironic_inspector.keystone.default_username }}
-password = {{ ironic_inspector.keystone.default_password }}
-auth_url = {{ ironic_inspector.service_catalog.auth_url }}
-auth_type = password
-auth_strategy = keystone
-user_domain_id = default
-project_domain_id = default
-
-{% else %}
-auth_strategy = {{ ironic_auth_strategy | default('noauth') }}
-{% endif %}
-
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-[keystone_authtoken]
-auth_plugin = password
-auth_url = {{ ironic_inspector.service_catalog.auth_url }}
-username = {{ ironic_inspector.service_catalog.username }}
-password = {{ ironic_inspector.service_catalog.password }}
-user_domain_id = default
-project_name = service
-project_domain_id = default
-
-{% endif %}
-{#
-# Note(TheJulia) preserving ironic_url in the configuration
-# in case future changes allow breaking of the deployment across
-# multiple nodes.
-#ironic_url = http://localhost:6385/
-#}
-
-[processing]
-add_ports = {{ inspector_port_addition | default('pxe') }}
-keep_ports = {{ inspector_keep_ports | default('present') }}
-ramdisk_logs_dir = {{ inspector_data_dir }}/log
-always_store_ramdisk_logs = {{ inspector_store_ramdisk_logs | default('true') | bool }}
-{% if inspector.discovery.enabled == true %}
-node_not_found_hook = enroll
-
-[discovery]
-enroll_node_driver = {{ inspector.discovery.default_node_driver }}
-{% endif %}
diff --git a/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic.conf.j2 b/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic.conf.j2
deleted file mode 100644
index d8896fa9e..000000000
--- a/prototypes/bifrost/playbooks/roles/bifrost-ironic-install/templates/ironic.conf.j2
+++ /dev/null
@@ -1,92 +0,0 @@
-# {{ ansible_managed }}
-# For additional details on configuring ironic, you may wish to reference
-# the sample configuration file which can be located at
-# http://git.openstack.org/cgit/openstack/ironic/tree/etc/ironic/ironic.conf.sample
-
-
-[DEFAULT]
-# NOTE(TheJulia): Until Bifrost supports neutron or some other network
-# configuration besides a flat network where bifrost orchustrates the
-# control instead of ironic, noop is the only available network driver.
-enabled_network_interfaces = noop
-{% if testing | bool == true %}
-enabled_drivers = agent_ipmitool,pxe_ipmitool
-debug = true
-{% else %}
-enabled_drivers = {{ enabled_drivers }}
-debug = false
-{% endif %}
-
-rabbit_userid = ironic
-rabbit_password = {{ ironic_db_password }}
-
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-auth_strategy = keystone
-{% else %}
-auth_strategy = noauth
-{% endif %}
-
-[pxe]
-pxe_append_params = systemd.journald.forward_to_console=yes {{ extra_kernel_options | default('') }}
-pxe_config_template = $pybasedir/drivers/modules/ipxe_config.template
-tftp_server = {{ hostvars[inventory_hostname]['ansible_' + ans_network_interface]['ipv4']['address'] }}
-tftp_root = /tftpboot
-pxe_bootfile_name = undionly.kpxe
-ipxe_enabled = true
-ipxe_boot_script = /etc/ironic/boot.ipxe
-
-[deploy]
-http_url = http://{{ hostvars[inventory_hostname]['ansible_' + ans_network_interface]['ipv4']['address'] }}:{{ file_url_port }}/
-http_root = {{ http_boot_folder }}
-
-[conductor]
-api_url = http://{{ hostvars[inventory_hostname]['ansible_' + ans_network_interface]['ipv4']['address'] }}:6385/
-clean_nodes = {{ cleaning | lower }}
-automated_clean = {{ cleaning | lower }}
-
-[database]
-connection = mysql+pymysql://ironic:{{ ironic_db_password }}@localhost/ironic?charset=utf8
-min_pool_size = 1
-max_pool_size = 5
-
-[dhcp]
-dhcp_provider = none
-
-{% if testing | bool == true %}
-[ssh]
-libvirt_uri = qemu:///system
-{% endif %}
-
-{% if enable_cors | bool == true %}
-[cors]
-allowed_origin = {{ cors_allowed_origin | default('allowed_origin=http://localhost:8000') }}
-allow_credentials = {{ enable_cors_credential_support | default('true') }}
-{% endif %}
-
-[ilo]
-use_web_server_for_images = true
-
-{% if enable_inspector | bool == true %}
-[inspector]
-enabled = true
-{% endif %}
-
-{% if enable_keystone is defined and enable_keystone | bool == true %}
-[keystone]
-region_name = {{ keystone.bootstrap.region_name | default('RegionOne')}}
-[keystone_authtoken]
-auth_plugin = password
-auth_url = {{ ironic.service_catalog.auth_url }}
-username = {{ ironic.service_catalog.username }}
-password = {{ ironic.service_catalog.password }}
-user_domain_id = default
-project_name = {{ ironic.service_catalog.project_name }}
-project_domain_id = default
-
-[service_catalog]
-auth_url = {{ ironic.service_catalog.auth_url }}
-auth_type = password
-tenant_name = {{ ironic.service_catalog.project_name }}
-username = {{ ironic.service_catalog.username }}
-password = {{ ironic.service_catalog.password }}
-{% endif %}
diff --git a/prototypes/bifrost/scripts/bifrost-provision.sh b/prototypes/bifrost/scripts/bifrost-provision.sh
index 2814808f0..d3b28ee10 100755
--- a/prototypes/bifrost/scripts/bifrost-provision.sh
+++ b/prototypes/bifrost/scripts/bifrost-provision.sh
@@ -34,7 +34,7 @@ export BIFROST_INVENTORY_SOURCE=$BAREMETAL_DATA_FILE
# Default settings for VMs
export TEST_VM_NUM_NODES=${TEST_VM_NUM_NODES:-3}
-export TEST_VM_NODE_NAMES=${TEST_VM_NODE_NAMES:-"xcimaster controller00 compute00"}
+export TEST_VM_NODE_NAMES=${TEST_VM_NODE_NAMES:-"opnfv controller00 compute00"}
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-4}
export VM_DISK=${VM_DISK:-100}
diff --git a/prototypes/openstack-ansible/playbooks/configure-targethosts.yml b/prototypes/openstack-ansible/playbooks/configure-targethosts.yml
index 1f4ad063e..538fe17ec 100644
--- a/prototypes/openstack-ansible/playbooks/configure-targethosts.yml
+++ b/prototypes/openstack-ansible/playbooks/configure-targethosts.yml
@@ -47,7 +47,7 @@
remote_user: root
tasks:
- name: make nfs dir
- file: "dest=/images mode=777 state=directory"
+ file: "dest=/images mode=0777 state=directory"
- name: configure sdrvice
shell: "echo 'nfs 2049/tcp' >> /etc/services && echo 'nfs 2049/udp' >> /etc/services"
- name: configure NFS
diff --git a/prototypes/xci/README.rst b/prototypes/xci/README.rst
new file mode 100644
index 000000000..8318cdb52
--- /dev/null
+++ b/prototypes/xci/README.rst
@@ -0,0 +1,217 @@
+###########################
+OPNFV XCI Developer Sandbox
+###########################
+
+The XCI Developer Sandbox is created by the OPNFV community for the OPNFV
+community in order to
+
+- provide means for OPNFV developers to work with OpenStack master branch,
+ cutting the time it takes to develop new features significantly and testing
+ them on OPNFV Infrastructure
+- enable OPNFV developers to identify bugs earlier, issue fixes faster, and
+ get feedback on a daily basis
+- establish mechanisms to run additional testing on OPNFV Infrastructure to
+ provide feedback to OpenStack community
+- make the solutions we put in place available to other LF Networking Projects
+ OPNFV works with closely
+
+More information about OPNFV XCI and the sandbox can be seen on
+`OPNFV Wiki <https://wiki.opnfv.org/pages/viewpage.action?pageId=8687635>`_.
+
+===================================
+Components of XCI Developer Sandbox
+===================================
+
+The sandbox uses OpenStack projects for VM node creation, provisioning
+and OpenStack installation.
+
+- **openstack/bifrost:** Bifrost (pronounced bye-frost) is a set of Ansible
+ playbooks that automates the task of deploying a base image onto a set
+ of known hardware using ironic. It provides modular utility for one-off
+ operating system deployment with as few operational requirements as
+ reasonably possible. Bifrost supports different operating systems such as
+ Ubuntu, CentOS, and openSUSE.
+ More information about this project can be seen on
+ `Bifrost documentation <https://docs.openstack.org/developer/bifrost/>`_.
+
+- **openstack/openstack-ansible:** OpenStack-Ansible is an official OpenStack
+ project which aims to deploy production environments from source in a way
+ that makes it scalable while also being simple to operate, upgrade, and grow.
+ More information about this project can be seen on
+ `OpenStack Ansible documentation <https://docs.openstack.org/developer/openstack-ansible/>`_.
+
+- **opnfv/releng:** OPNFV Releng Project provides additional scripts, Ansible
+ playbooks and configuration options in order for developers to have easy
+ way of using openstack/bifrost and openstack/openstack-ansible by just
+ setting couple of environment variables and executing a single script.
+ More infromation about this project can be seen on
+ `OPNFV Releng documentation <https://wiki.opnfv.org/display/releng>_`.
+
+==========
+Basic Flow
+==========
+
+Here are the steps that take place upon the execution of the sandbox script
+``xci-deploy.sh``:
+
+1. Sources environment variables in order to set things up properly.
+2. Installs ansible on the host where sandbox script is executed.
+3. Creates and provisions VM nodes based on the flavor chosen by the user.
+4. Configures the host where the sandbox script is executed.
+5. Configures the deployment host which the OpenStack installation will
+ be driven from.
+6. Configures the target hosts where OpenStack will be installed.
+7. Configures the target hosts as controller(s) and compute(s) nodes.
+8. Starts the OpenStack installation.
+
+=====================
+Sandbox Prerequisites
+=====================
+
+In order to use this sandbox, the host must have certain packages installed.
+
+- libvirt
+- python
+- pip
+- git
+- <fix the list with all the dependencies>
+- passwordless sudo
+
+The host must also have enough CPU/RAM/Disk in order to host number of VM
+nodes that will be created based on the chosen flavor. See the details from
+`this link <https://wiki.opnfv.org/display/INF/XCI+Developer+Sandbox#XCIDeveloperSandbox-Prerequisites>`_.
+
+===========================
+Flavors Provided by Sandbox
+===========================
+
+OPNFV XCI Sandbox provides different flavors such as all in one (aio) which
+puts much lower requirements on the host machine and full-blown HA.
+
+* aio: Single node which acts as the deployment host, controller and compute.
+* mini: One deployment host, 1 controller node and 1 compute node.
+* noha: One deployment host, 1 controller node and 2 compute nodes.
+* ha: One deployment host, 3 controller nodes and 2 compute nodes.
+
+See the details of the flavors from
+`this link <https://wiki.opnfv.org/display/INF/XCI+Developer+Sandbox#XCIDeveloperSandbox-AvailableFlavors>`_.
+
+==========
+How to Use
+==========
+
+Basic Usage
+-----------
+
+clone OPNFV Releng repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng.git
+
+change into directory where the sandbox script is located
+
+ cd releng/prototypes/xci
+
+execute sandbox script
+
+ sudo -E ./xci-deploy.sh
+
+Issuing above command will start aio sandbox deployment and the sandbox
+should be ready between 1,5 and 2 hours depending on the host machine.
+
+Advanced Usage
+--------------
+
+The flavor to deploy, the versions of upstream components to use can
+be configured by developers by setting certain environment variables.
+Below example deploys noha flavor using the latest of openstack-ansible
+master branch and stores logs in different location than what is configured.
+
+clone OPNFV Releng repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng.git
+
+change into directory where the sandbox script is located
+
+ cd releng/prototypes/xci
+
+set the sandbox flavor
+
+ export XCI_FLAVOR=noha
+
+set the version to use for openstack-ansible
+
+ export OPENSTACK_OSA_VERSION=master
+
+set where the logs should be stored
+
+ export LOG_PATH=/home/jenkins/xcilogs
+
+execute sandbox script
+
+ sudo -E ./xci-deploy.sh
+
+Warning::
+
+ Please encure you always execute the sandbox script using **sudo -E**
+ in order to make the environment variables you set available to the
+ sandbox script or you end up with the default settings.
+
+===============
+User Variables
+===============
+
+All user variables can be set from command line by exporting them before
+executing the script. The current user variables can be seen from
+``releng/prototypes/xci/config/user-vars``.
+
+The variables can also be set directly within the file before executing
+the sandbox script.
+
+===============
+Pinned Versions
+===============
+
+As explained above, the users can pick and choose which versions to use. If
+you want to be on the safe side, you can use the pinned versions the sandbox
+provides. They can be seen from ``releng/prototypes/xci/config/pinned-versions``.
+
+How Pinned Versions are Determined
+----------------------------------
+
+OPNFV runs periodic jobs against upstream projects openstack/bifrost and
+openstack/ansible using latest on master and stable/ocata branches,
+continuously chasing the HEAD of corresponding branches.
+
+Once a working version is identified, the versions of the upstream components
+are then bumped in releng repo.
+
+===========================================
+Limitations, Known Issues, and Improvements
+===========================================
+
+The list can be seen using `this link <https://jira.opnfv.org/issues/?filter=11616>`_.
+
+=========
+Changelog
+=========
+
+Changelog can be seen using `this link <https://jira.opnfv.org/issues/?filter=11625>`_.
+
+=======
+Testing
+=======
+
+Sandbox is continuously tested by OPNFV CI to ensure the changes do not impact
+users. In fact, OPNFV CI itself uses the sandbox scripts to run daily platform
+verification jobs.
+
+=======
+Support
+=======
+
+OPNFV XCI issues are tracked on OPNFV JIRA Releng project. If you encounter
+and issue or identify a bug, please submit an issue to JIRA using
+`this link <https://jira.opnfv.org/projects/RELENG>_`.
+
+If you have questions or comments, you can ask them on ``#opnfv-pharos`` IRC
+channel on Freenode.
diff --git a/prototypes/xci/config/env-vars b/prototypes/xci/config/env-vars
index 1bb553b60..cefb412a6 100755
--- a/prototypes/xci/config/env-vars
+++ b/prototypes/xci/config/env-vars
@@ -9,9 +9,14 @@ export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy
export CLEAN_DIB_IMAGES=false
export OPNFV_HOST_IP=192.168.122.2
export XCI_FLAVOR_ANSIBLE_FILE_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR
+export CI_LOOP=${CI_LOOP:-daily}
export JOB_NAME=${JOB_NAME:-false}
+# TODO: this currently matches to bifrost ansible version
+# there is perhaps better way to do this
+export XCI_ANSIBLE_PIP_VERSION=2.1.5.0
export ANSIBLE_HOST_KEY_CHECKING=False
export DISTRO=${DISTRO:-ubuntu}
export DIB_OS_RELEASE=${DIB_OS_RELEASE:-xenial}
export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal}
export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptables"}
+export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
diff --git a/prototypes/xci/config/pinned-versions b/prototypes/xci/config/pinned-versions
index 1cd33813c..e3b49c7d4 100755
--- a/prototypes/xci/config/pinned-versions
+++ b/prototypes/xci/config/pinned-versions
@@ -21,7 +21,7 @@
#-------------------------------------------------------------------------------
# use releng from master until the development work with the sandbox is complete
export OPNFV_RELENG_VERSION="master"
-# HEAD of "master" as of 28.03.2017
-export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"2600d546ed7116f5aad81972b0987a269f3c45b4"}
-# HEAD of "master" as of 26.03.2017
-export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"baba7b317a5898cd73b4a11c4ce364c7e2d3d77f"}
+# HEAD of "master" as of 04.04.2017
+export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"6109f824e5510e794dbf1968c3859e8b6356d280"}
+# HEAD of "master" as of 04.04.2017
+export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"d9e1330c7ff9d72a604b6b4f3af765f66a01b30e"}
diff --git a/prototypes/xci/docs/developer-guide.rst b/prototypes/xci/docs/developer-guide.rst
new file mode 100644
index 000000000..9a07b1267
--- /dev/null
+++ b/prototypes/xci/docs/developer-guide.rst
@@ -0,0 +1,31 @@
+#########################
+OPNFV XCI Developer Guide
+#########################
+
+This document will contain details about the XCI and how things are put
+together in order to support different flavors and different distros in future.
+
+Document is for anyone who will
+
+- do hands on development with XCI such as new features to XCI itself or
+ bugfixes
+- integrate new features
+- want to know what is going on behind the scenes
+
+It will also have guidance regarding how to develop for the sandbox.
+
+If you are looking for User's Guide, please check README.rst in the root of
+xci folder or take a look at
+`Wiki <https://wiki.opnfv.org/display/INF/OpenStack>`_.
+
+===================================
+Components of XCI Developer Sandbox
+===================================
+
+TBD
+
+=============
+Detailed Flow
+=============
+
+TBD
diff --git a/prototypes/xci/file/aio/configure-opnfvhost.yml b/prototypes/xci/file/aio/configure-opnfvhost.yml
new file mode 100644
index 000000000..5c66d40c7
--- /dev/null
+++ b/prototypes/xci/file/aio/configure-opnfvhost.yml
@@ -0,0 +1,22 @@
+---
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ vars_files:
+ - ../var/opnfv.yml
+ roles:
+ - role: remove-folders
+ - { role: clone-repository, project: "openstack/openstack-ansible", repo: "{{ OPENSTACK_OSA_GIT_URL }}", dest: "{{ OPENSTACK_OSA_PATH }}", version: "{{ OPENSTACK_OSA_VERSION }}" }
+ tasks:
+ - name: bootstrap ansible on opnfv host
+ command: "/bin/bash ./scripts/bootstrap-ansible.sh"
+ args:
+ chdir: "{{OPENSTACK_OSA_PATH}}"
+ - name: bootstrap opnfv host as aio
+ command: "/bin/bash ./scripts/bootstrap-aio.sh"
+ args:
+ chdir: "{{OPENSTACK_OSA_PATH}}"
+ - name: install OpenStack on opnfv host - this command doesn't log anything to console
+ command: "/bin/bash ./scripts/run-playbooks.sh"
+ args:
+ chdir: "{{OPENSTACK_OSA_PATH}}"
diff --git a/prototypes/xci/file/aio/flavor-vars.yml b/prototypes/xci/file/aio/flavor-vars.yml
index e69de29bb..6ac1e0fe9 100644
--- a/prototypes/xci/file/aio/flavor-vars.yml
+++ b/prototypes/xci/file/aio/flavor-vars.yml
@@ -0,0 +1,3 @@
+---
+# this file is added intentionally in order to simplify putting files in place
+# in future, it might contain vars specific to this flavor
diff --git a/prototypes/xci/file/aio/inventory b/prototypes/xci/file/aio/inventory
index e69de29bb..9a3dd9ee3 100644
--- a/prototypes/xci/file/aio/inventory
+++ b/prototypes/xci/file/aio/inventory
@@ -0,0 +1,2 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
diff --git a/prototypes/xci/file/ansible-role-requirements.yml b/prototypes/xci/file/ansible-role-requirements.yml
index 4faab1950..842bcc44c 100644
--- a/prototypes/xci/file/ansible-role-requirements.yml
+++ b/prototypes/xci/file/ansible-role-requirements.yml
@@ -7,199 +7,193 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+# these versions are extracted based on the osa commit d9e1330c7ff9d72a604b6b4f3af765f66a01b30e on 04.04.2017
+# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=d9e1330c7ff9d72a604b6b4f3af765f66a01b30e
- name: apt_package_pinning
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: master
+ version: 364fc9fcd8ff652546c13d9c20ac808bc0e35f66
- name: pip_install
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: master
+ version: 793ae4d01397bd91ebe18e9670e8e27d1ae91960
- name: galera_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: master
+ version: c093c13e01826da545bf9a0259e0be441bc1b5e1
- name: galera_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: master
+ version: fd0a6b104a32badbe7e7594e2c829261a53bfb11
- name: ceph_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: master
+ version: 9149bfa8e3c4284b656834ba7765ea3aa48bec2e
- name: haproxy_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: master
+ version: 32415ab81c61083ac5a83b65274703e4a5470e5e
- name: keepalived
scm: git
src: https://github.com/evrardjp/ansible-keepalived
- version: master
+ version: 4f7c8eb16e3cbd8c8748f126c1eea73db5c8efe9
- name: lxc_container_create
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: master
+ version: 097da38126d90cfca36cdc3955aaf658a00db599
- name: lxc_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: master
+ version: 2931d0c87a1c592ad7f1f2f83cdcf468e8dea932
- name: memcached_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: master
+ version: 58e17aa13ebe7b0aa5da7c00afc75d6716d2720d
- name: openstack-ansible-security
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-security
- version: master
+ version: 9d745ec4fe8ac3e6d6cbb2412abe5196a9d2dad7
- name: openstack_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: master
+ version: 2076dfddf418b1bdd64d3782346823902aa996bc
- name: os_keystone
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: master
+ version: cee7a02143a1826479e6444c6fb5f1c2b6074ab7
- name: openstack_openrc
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: master
+ version: fb98ad8d7bfe7fba0c964cb061313f1b8767c4b0
- name: os_aodh
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: master
+ version: 9dcacb8fd6feef02e485f99c83535707ae67876b
- name: os_barbican
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: master
+ version: bb3f39cb2f3c31c6980aa65c8953ff6293b992c0
- name: os_ceilometer
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
- version: master
+ version: 178ad8245fa019f0610c628c58c377997b011e8a
- name: os_cinder
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: master
+ version: 1321fd39d8f55d1dc3baf91b4194469b349d7dc4
- name: os_glance
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: master
+ version: f39ef212bfa2edff8334bfb632cc463001c77c11
- name: os_gnocchi
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
- version: master
+ version: 318bd76e5e72402e8ff5b372b469c27a9395341b
- name: os_heat
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: master
+ version: 07d59ddb757b2d2557fba52ac537803e646e65b4
- name: os_horizon
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: master
+ version: 69ef49c4f7a42f082f4bcff824d13f57145e2b83
- name: os_ironic
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: master
+ version: 57e8a0eaaa2159f33e64a1b037180383196919d1
- name: os_magnum
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
- version: master
+ version: 8329c257dff25686827bd1cc904506d76ad1d12f
- name: os_trove
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_trove
- version: master
+ version: b948402c76d6188caa7be376098354cdb850d638
- name: os_neutron
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_neutron
- version: master
+ version: 2a92a4e1857e7457683aefd87ee5a4e751fc701a
- name: os_nova
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: master
+ version: 511963b7921ec7c2db24e8ee1d71a940b0aafae4
- name: os_rally
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: master
+ version: 96153c5b3285d11d00611a03135c9d8f267e0f52
- name: os_sahara
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: master
+ version: 012d3f3530f878e5143d58380f94d1f514baad04
- name: os_swift
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: master
+ version: d62d6a23ac0b01d0320dbcb6c710dfd5f3cecfdf
- name: os_tempest
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: master
+ version: 9d2bfb09d1ebbc9102329b0d42de33aa321e57b1
- name: plugins
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: master
+ version: 3d2e23bb7e1d6775789d7f65ce8a878a7ee1d3c7
- name: rabbitmq_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: master
+ version: 9b0ce64fe235705e237bc4b476ecc0ad602d67a8
- name: repo_build
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: master
+ version: fe3ae20f74a912925d5c78040984957a6d55f9de
- name: repo_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: master
+ version: 7ea0820e0941282cd5c5cc263e939ffbee54ba52
- name: rsyslog_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: master
+ version: 19615e47137eee46ee92c0308532fe1d2212333c
- name: rsyslog_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: master
+ version: efd7b21798da49802012e390a0ddf7cc38636eeb
- name: sshd
scm: git
src: https://github.com/willshersystems/ansible-sshd
- version: master
+ version: 426e11c4dffeca09fcc4d16103a91e5e65180040
- name: bird
scm: git
src: https://github.com/logan2211/ansible-bird
- version: master
+ version: 2c4d29560d3617abddf0e63e0c95536364dedd92
- name: etcd
scm: git
src: https://github.com/logan2211/ansible-etcd
- version: master
+ version: ef63b0c5fd352b61084fd5aca286ee7f3fea932b
- name: unbound
scm: git
src: https://github.com/logan2211/ansible-unbound
- version: master
+ version: 5329d03eb9c15373d648a801563087c576bbfcde
- name: resolvconf
scm: git
src: https://github.com/logan2211/ansible-resolvconf
- version: master
+ version: 3b2b7cf2e900b194829565b351bf32bb63954548
- name: os_designate
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: master
+ version: b7098a6bdea73c869f45a86e0cc78d21b032161e
- name: ceph.ceph-common
scm: git
src: https://github.com/ceph/ansible-ceph-common
- version: master
+ version: ef149767fa9565ec887f0bdb007ff752bd61e5d5
- name: ceph.ceph-docker-common
scm: git
src: https://github.com/ceph/ansible-ceph-docker-common
- version: master
+ version: ca86fd0ef6d24aa2c750a625acdcb8012c374aa0
- name: ceph-mon
scm: git
src: https://github.com/ceph/ansible-ceph-mon
- version: master
+ version: c5be4d6056dfe6a482ca3fcc483a6050cc8929a1
- name: ceph-osd
scm: git
src: https://github.com/ceph/ansible-ceph-osd
- version: master
-- name: os_octavia
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
- version: master
-- name: os_molteniron
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: master
+ version: 7bc5a61ceb96e487b7a9fe9643f6dafa6492f2b5
diff --git a/prototypes/xci/file/exports b/prototypes/xci/file/exports
deleted file mode 100644
index af64d618d..000000000
--- a/prototypes/xci/file/exports
+++ /dev/null
@@ -1,14 +0,0 @@
-# /etc/exports: the access control list for filesystems which may be exported
-# to NFS clients. See exports(5).
-#
-# Example for NFSv2 and NFSv3:
-# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
-#
-# Example for NFSv4:
-# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
-# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
-#
-# glance images are stored on compute host and made available to image hosts via nfs
-# see image_hosts section in openstack_user_config.yml for details
-/images *(rw,sync,no_subtree_check,no_root_squash)
-
diff --git a/prototypes/xci/file/ha/flavor-vars.yml b/prototypes/xci/file/ha/flavor-vars.yml
index e69de29bb..167502c95 100644
--- a/prototypes/xci/file/ha/flavor-vars.yml
+++ b/prototypes/xci/file/ha/flavor-vars.yml
@@ -0,0 +1,39 @@
+---
+host_info: {
+ 'opnfv': {
+ 'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
+ 'STORAGE_IP': '172.29.244.10'
+ },
+ 'controller00': {
+ 'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
+ 'STORAGE_IP': '172.29.244.11'
+ },
+ 'controller01': {
+ 'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
+ 'STORAGE_IP': '172.29.244.12'
+ },
+ 'controller02': {
+ 'VLAN_IP': '192.168.122.5',
+ 'MGMT_IP': '172.29.236.13',
+ 'VXLAN_IP': '172.29.240.13',
+ 'STORAGE_IP': '172.29.244.13'
+ },
+ 'compute00': {
+ 'VLAN_IP': '192.168.122.6',
+ 'MGMT_IP': '172.29.236.14',
+ 'VXLAN_IP': '172.29.240.14',
+ 'STORAGE_IP': '172.29.244.14'
+ },
+ 'compute01': {
+ 'VLAN_IP': '192.168.122.7',
+ 'MGMT_IP': '172.29.236.15',
+ 'VXLAN_IP': '172.29.240.15',
+ 'STORAGE_IP': '172.29.244.15'
+ }
+}
diff --git a/prototypes/xci/file/ha/inventory b/prototypes/xci/file/ha/inventory
index e69de29bb..94b1d074d 100644
--- a/prototypes/xci/file/ha/inventory
+++ b/prototypes/xci/file/ha/inventory
@@ -0,0 +1,11 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+controller01 ansible_ssh_host=192.168.122.4
+controller02 ansible_ssh_host=192.168.122.5
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.6
+compute01 ansible_ssh_host=192.168.122.7
diff --git a/prototypes/xci/file/ha/openstack_user_config.yml b/prototypes/xci/file/ha/openstack_user_config.yml
index e69de29bb..09fb734c1 100644
--- a/prototypes/xci/file/ha/openstack_user_config.yml
+++ b/prototypes/xci/file/ha/openstack_user_config.yml
@@ -0,0 +1,254 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
diff --git a/prototypes/xci/file/ha/user_variables.yml b/prototypes/xci/file/ha/user_variables.yml
new file mode 100644
index 000000000..094cc8cd6
--- /dev/null
+++ b/prototypes/xci/file/ha/user_variables.yml
@@ -0,0 +1,28 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
diff --git a/prototypes/xci/file/mini/flavor-vars.yml b/prototypes/xci/file/mini/flavor-vars.yml
index e69de29bb..0d446ba20 100644
--- a/prototypes/xci/file/mini/flavor-vars.yml
+++ b/prototypes/xci/file/mini/flavor-vars.yml
@@ -0,0 +1,21 @@
+---
+host_info: {
+ 'opnfv': {
+ 'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
+ 'STORAGE_IP': '172.29.244.10'
+ },
+ 'controller00': {
+ 'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
+ 'STORAGE_IP': '172.29.244.11'
+ },
+ 'compute00': {
+ 'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
+ 'STORAGE_IP': '172.29.244.12'
+ },
+}
diff --git a/prototypes/xci/file/mini/inventory b/prototypes/xci/file/mini/inventory
index e69de29bb..eb73e5e34 100644
--- a/prototypes/xci/file/mini/inventory
+++ b/prototypes/xci/file/mini/inventory
@@ -0,0 +1,8 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.4
diff --git a/prototypes/xci/file/mini/openstack_user_config.yml b/prototypes/xci/file/mini/openstack_user_config.yml
index e69de29bb..f9ccee24f 100644
--- a/prototypes/xci/file/mini/openstack_user_config.yml
+++ b/prototypes/xci/file/mini/openstack_user_config.yml
@@ -0,0 +1,170 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/prototypes/xci/file/mini/user_variables.yml b/prototypes/xci/file/mini/user_variables.yml
new file mode 100644
index 000000000..7a0b8064d
--- /dev/null
+++ b/prototypes/xci/file/mini/user_variables.yml
@@ -0,0 +1,28 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
diff --git a/prototypes/xci/file/modules b/prototypes/xci/file/modules
deleted file mode 100644
index 60a517f18..000000000
--- a/prototypes/xci/file/modules
+++ /dev/null
@@ -1,8 +0,0 @@
-# /etc/modules: kernel modules to load at boot time.
-#
-# This file contains the names of kernel modules that should be loaded
-# at boot time, one per line. Lines beginning with "#" are ignored.
-# Parameters can be specified after the module name.
-
-bonding
-8021q
diff --git a/prototypes/xci/file/noha/flavor-vars.yml b/prototypes/xci/file/noha/flavor-vars.yml
index e69de29bb..3c69a34bb 100644
--- a/prototypes/xci/file/noha/flavor-vars.yml
+++ b/prototypes/xci/file/noha/flavor-vars.yml
@@ -0,0 +1,27 @@
+---
+host_info: {
+ 'opnfv': {
+ 'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
+ 'STORAGE_IP': '172.29.244.10'
+ },
+ 'controller00': {
+ 'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
+ 'STORAGE_IP': '172.29.244.11'
+ },
+ 'compute00': {
+ 'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
+ 'STORAGE_IP': '172.29.244.12'
+ },
+ 'compute01': {
+ 'VLAN_IP': '192.168.122.5',
+ 'MGMT_IP': '172.29.236.13',
+ 'VXLAN_IP': '172.29.240.13',
+ 'STORAGE_IP': '172.29.244.13'
+ }
+}
diff --git a/prototypes/xci/file/noha/inventory b/prototypes/xci/file/noha/inventory
index e69de29bb..b4f9f6d0c 100644
--- a/prototypes/xci/file/noha/inventory
+++ b/prototypes/xci/file/noha/inventory
@@ -0,0 +1,9 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.4
+compute01 ansible_ssh_host=192.168.122.5
diff --git a/prototypes/xci/file/noha/openstack_user_config.yml b/prototypes/xci/file/noha/openstack_user_config.yml
index e69de29bb..fb12655e7 100644
--- a/prototypes/xci/file/noha/openstack_user_config.yml
+++ b/prototypes/xci/file/noha/openstack_user_config.yml
@@ -0,0 +1,172 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/prototypes/xci/file/noha/user_variables.yml b/prototypes/xci/file/noha/user_variables.yml
new file mode 100644
index 000000000..7a0b8064d
--- /dev/null
+++ b/prototypes/xci/file/noha/user_variables.yml
@@ -0,0 +1,28 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
diff --git a/prototypes/xci/file/setup-openstack.yml b/prototypes/xci/file/setup-openstack.yml
index 48f156ad7..415c48993 100644
--- a/prototypes/xci/file/setup-openstack.yml
+++ b/prototypes/xci/file/setup-openstack.yml
@@ -1,5 +1,5 @@
---
-# Copyright 2017, Rackspace US, Inc. and others
+# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,7 +20,6 @@
- include: os-neutron-install.yml
- include: os-heat-install.yml
- include: os-horizon-install.yml
-- include: os-ceilometer-install.yml
-- include: os-aodh-install.yml
- include: os-swift-install.yml
- include: os-ironic-install.yml
+- include: os-tempest-install.yml
diff --git a/prototypes/xci/file/user_variables.yml b/prototypes/xci/file/user_variables.yml
deleted file mode 100644
index e69de29bb..000000000
--- a/prototypes/xci/file/user_variables.yml
+++ /dev/null
diff --git a/prototypes/xci/playbooks/configure-localhost.yml b/prototypes/xci/playbooks/configure-localhost.yml
index c1a0134d9..34b974cd1 100644
--- a/prototypes/xci/playbooks/configure-localhost.yml
+++ b/prototypes/xci/playbooks/configure-localhost.yml
@@ -11,6 +11,7 @@
remote_user: root
vars_files:
- ../var/{{ ansible_os_family }}.yml
+ - ../var/opnfv.yml
roles:
- role: remove-folders
- { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" }
@@ -20,18 +21,16 @@
path: "{{LOG_PATH}}"
state: directory
recurse: no
- # when the deployment is not aio, we use playbook, configure-targethosts.yml, to configure all the hosts
- - name: copy multihost playbook
- copy:
- src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-targethosts.yml"
- dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
- when: XCI_FLAVOR != "aio"
# when the deployment is aio, we overwrite and use playbook, configure-opnfvhost.yml, since everything gets installed on opnfv host
- name: copy aio playbook
copy:
src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-opnfvhost.yml"
dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
when: XCI_FLAVOR == "aio"
+ - name: copy flavor inventory
+ copy:
+ src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/inventory"
+ dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
- name: copy flavor vars
copy:
src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/flavor-vars.yml"
diff --git a/prototypes/xci/playbooks/configure-opnfvhost.yml b/prototypes/xci/playbooks/configure-opnfvhost.yml
index 44a3d6a78..64fcef0db 100644
--- a/prototypes/xci/playbooks/configure-opnfvhost.yml
+++ b/prototypes/xci/playbooks/configure-opnfvhost.yml
@@ -12,10 +12,13 @@
vars_files:
- ../var/{{ ansible_os_family }}.yml
- ../var/flavor-vars.yml
+ - ../var/opnfv.yml
roles:
- role: remove-folders
- { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" }
- { role: clone-repository, project: "openstack/openstack-ansible", repo: "{{ OPENSTACK_OSA_GIT_URL }}", dest: "{{ OPENSTACK_OSA_PATH }}", version: "{{ OPENSTACK_OSA_VERSION }}" }
+ # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+ - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/opnfv.interface.j2", dest: "/etc/network/interfaces" }
tasks:
- name: generate SSH keys
shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
@@ -35,6 +38,16 @@
shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/user_variables.yml {{OPENSTACK_OSA_ETC_PATH}}"
- name: copy cinder.yml
shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/cinder.yml {{OPENSTACK_OSA_ETC_PATH}}/env.d"
+ # TODO: We need to get rid of this as soon as the issue is fixed upstream
+ - name: change the haproxy state from disable to enable
+ replace:
+ dest: "{{OPENSTACK_OSA_PATH}}/playbooks/os-keystone-install.yml"
+ regexp: '(\s+)haproxy_state: disabled'
+ replace: '\1haproxy_state: enabled'
+ - name: copy OPNFV OpenStack playbook
+ shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
+ - name: copy OPNFV role requirements
+ shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}"
- name: bootstrap ansible on opnfv host
command: "/bin/bash ./scripts/bootstrap-ansible.sh"
args:
@@ -43,22 +56,6 @@
command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
args:
chdir: "{{OPENSTACK_OSA_PATH}}/scripts"
- - name: copy OPNFV OpenStack playbook
- shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
- - name: copy OPNFV role requirements
- shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}"
- # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- # TODO: convert this into a role
- - name: configure network for ubuntu xenial
- template:
- src: ../template/opnfv.interface.j2
- dest: /etc/network/interfaces
- notify:
- - restart ubuntu xenial network service
- when: ansible_distribution_release == "xenial"
- handlers:
- - name: restart ubuntu xenial network service
- shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
- hosts: localhost
remote_user: root
tasks:
diff --git a/prototypes/xci/playbooks/configure-targethosts.yml b/prototypes/xci/playbooks/configure-targethosts.yml
new file mode 100644
index 000000000..50da1f223
--- /dev/null
+++ b/prototypes/xci/playbooks/configure-targethosts.yml
@@ -0,0 +1,36 @@
+---
+- hosts: all
+ remote_user: root
+ tasks:
+ - name: add public key to host
+ copy:
+ src: ../file/authorized_keys
+ dest: /root/.ssh/authorized_keys
+
+- hosts: controller
+ remote_user: root
+ vars_files:
+ - ../var/{{ ansible_os_family }}.yml
+ - ../var/flavor-vars.yml
+ roles:
+ # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+ - { role: configure-network, src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+ # we need to force sync time with ntp or the nodes will be out of sync timewise
+ - role: synchronize-time
+
+- hosts: compute
+ remote_user: root
+ vars_files:
+ - ../var/{{ ansible_os_family }}.yml
+ - ../var/flavor-vars.yml
+ roles:
+ # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
+ - { role: configure-network, src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+ # we need to force sync time with ntp or the nodes will be out of sync timewise
+ - role: synchronize-time
+
+- hosts: compute00
+ remote_user: root
+ # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
+ roles:
+ - role: configure-nfs
diff --git a/prototypes/xci/playbooks/provision-vm-nodes.yml b/prototypes/xci/playbooks/provision-vm-nodes.yml
new file mode 100644
index 000000000..9a32d0bfc
--- /dev/null
+++ b/prototypes/xci/playbooks/provision-vm-nodes.yml
@@ -0,0 +1,32 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ remote_user: root
+ vars_files:
+ - ../var/{{ ansible_os_family }}.yml
+ - ../var/opnfv.yml
+ roles:
+ # using these roles here ensures that we can reuse this playbook in different context
+ - role: remove-folders
+ - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" }
+ - { role: clone-repository, project: "opnfv/bifrost", repo: "{{ OPENSTACK_BIFROST_GIT_URL }}", dest: "{{ OPENSTACK_BIFROST_PATH }}", version: "{{ OPENSTACK_BIFROST_VERSION }}" }
+ tasks:
+ - name: combine opnfv/releng and openstack/bifrost scripts/playbooks
+ copy:
+ src: "{{ OPNFV_RELENG_PATH }}/prototypes/bifrost/"
+ dest: "{{ OPENSTACK_BIFROST_PATH }}"
+ - name: destroy VM nodes created by previous deployment
+ command: "/bin/bash ./scripts/destroy-env.sh"
+ args:
+ chdir: "{{ OPENSTACK_BIFROST_PATH }}"
+ - name: create and provision VM nodes for the flavor {{ XCI_FLAVOR }}
+ command: "/bin/bash ./scripts/bifrost-provision.sh"
+ args:
+ chdir: "{{ OPENSTACK_BIFROST_PATH }}"
diff --git a/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml
new file mode 100644
index 000000000..aafadf712
--- /dev/null
+++ b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# TODO: this role needs to be adjusted for different distros
+- block:
+ - name: configure modules
+ lineinfile:
+ dest: /etc/modules
+ state: present
+ create: yes
+ line: "8021q"
+ - name: add modules
+ modprobe:
+ name: 8021q
+ state: present
+ - name: ensure glean rules are removed
+ file:
+ path: "/etc/udev/rules.d/99-glean.rules"
+ state: absent
+ - name: ensure interfaces.d folder is empty
+ shell: "/bin/rm -rf /etc/network/interfaces.d/*"
+ - name: ensure interfaces file is updated
+ template:
+ src: "{{ src }}"
+ dest: "{{ dest }}"
+ - name: restart network service
+ shell: "/sbin/ifconfig {{ interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
+ when: ansible_distribution_release == "xenial"
diff --git a/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml
new file mode 100644
index 000000000..c52da0bf3
--- /dev/null
+++ b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# TODO: this is for xenial and needs to be adjusted for different distros
+- block:
+ - name: make NFS directories
+ file:
+ dest: "{{ item }}"
+ mode: 0777
+ state: directory
+ with_items:
+ - "/images"
+ - "/volumes"
+ - name: configure NFS service
+ lineinfile:
+ dest: /etc/services
+ state: present
+ create: yes
+ line: "{{ item }}"
+ with_items:
+ - "nfs 2049/tcp"
+ - "nfs 2049/udp"
+ - name: configure NFS exports
+ lineinfile:
+ dest: /etc/exports
+ state: present
+ create: yes
+ line: "{{ item }}"
+ with_items:
+ - "/images *(rw,sync,no_subtree_check,no_root_squash)"
+ - "/volumes *(rw,sync,no_subtree_check,no_root_squash)"
+ # TODO: the service name might be different on other distros and needs to be adjusted
+ - name: restart ubuntu xenial NFS service
+ service:
+ name: nfs-kernel-server
+ state: restarted
+ when: ansible_distribution_release == "xenial"
diff --git a/prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml b/prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml
index fb321dfde..ac8c0f7dc 100644
--- a/prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml
+++ b/prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml
@@ -14,6 +14,7 @@
recurse: no
with_items:
- "{{ OPNFV_RELENG_PATH }}"
+ - "{{ OPENSTACK_BIFROST_PATH }}"
- "{{ OPENSTACK_OSA_PATH }}"
- "{{ OPENSTACK_OSA_ETC_PATH }}"
- "{{ LOG_PATH }} "
diff --git a/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml b/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml
new file mode 100644
index 000000000..5c39d897b
--- /dev/null
+++ b/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# TODO: this role needs to be adjusted for different distros
+- block:
+ - name: restart chrony
+ service:
+ name: chrony
+ state: restarted
+ - name: synchronize time
+ shell: "chronyc -a 'burst 4/4' && chronyc -a makestep"
+ when: ansible_distribution_release == "xenial"
diff --git a/prototypes/xci/template/compute.interface.j2 b/prototypes/xci/template/compute.interface.j2
index 1719f6a08..094544c3b 100644
--- a/prototypes/xci/template/compute.interface.j2
+++ b/prototypes/xci/template/compute.interface.j2
@@ -1,29 +1,25 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
# The loopback network interface
auto lo
iface lo inet loopback
-
# Physical interface
-auto ens3
-iface ens3 inet manual
+auto {{ interface }}
+iface {{ interface }} inet manual
# Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
- vlan-raw-device ens3
+auto {{ interface }}.10
+iface {{ interface }}.10 inet manual
+ vlan-raw-device {{ interface }}
# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
- vlan-raw-device ens3
+auto {{ interface }}.30
+iface {{ interface }}.30 inet manual
+ vlan-raw-device {{ interface }}
-# Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
- vlan-raw-device ens3
+# Storage network VLAN interface
+auto {{ interface }}.20
+iface {{ interface }}.20 inet manual
+ vlan-raw-device {{ interface }}
# Container/Host management bridge
auto br-mgmt
@@ -31,7 +27,7 @@ iface br-mgmt inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3.10
+ bridge_ports {{ interface }}.10
address {{host_info[inventory_hostname].MGMT_IP}}
netmask 255.255.252.0
@@ -41,7 +37,7 @@ iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3.30
+ bridge_ports {{ interface }}.30
address {{host_info[inventory_hostname].VXLAN_IP}}
netmask 255.255.252.0
@@ -51,10 +47,11 @@ iface br-vlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3
+ bridge_ports {{ interface }}
address {{host_info[inventory_hostname].VLAN_IP}}
netmask 255.255.255.0
gateway 192.168.122.1
+ dns-nameserver 8.8.8.8 8.8.4.4
offload-sg off
# Create veth pair, don't bomb if already exists
pre-up ip link add br-vlan-veth type veth peer name eth12 || true
@@ -65,22 +62,12 @@ iface br-vlan inet static
post-down ip link del br-vlan-veth || true
bridge_ports br-vlan-veth
-# Add an additional address to br-vlan
-iface br-vlan inet static
- # Flat network default gateway
- # -- This needs to exist somewhere for network reachability
- # -- from the router namespace for floating IP paths.
- # -- Putting this here is primarily for tempest to work.
- address {{host_info[inventory_hostname].VLAN_IP_SECOND}}
- netmask 255.255.252.0
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# compute1 Storage bridge
+# OpenStack Storage bridge
auto br-storage
iface br-storage inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3.20
+ bridge_ports {{ interface }}.20
address {{host_info[inventory_hostname].STORAGE_IP}}
netmask 255.255.252.0
diff --git a/prototypes/xci/template/controller.interface.j2 b/prototypes/xci/template/controller.interface.j2
index 74aeea99d..638e78e18 100644
--- a/prototypes/xci/template/controller.interface.j2
+++ b/prototypes/xci/template/controller.interface.j2
@@ -1,28 +1,25 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
# The loopback network interface
auto lo
iface lo inet loopback
# Physical interface
-auto ens3
-iface ens3 inet manual
+auto {{ interface }}
+iface {{ interface }} inet manual
# Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
- vlan-raw-device ens3
+auto {{ interface }}.10
+iface {{ interface }}.10 inet manual
+ vlan-raw-device {{ interface }}
# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
- vlan-raw-device ens3
+auto {{ interface }}.30
+iface {{ interface }}.30 inet manual
+ vlan-raw-device {{ interface }}
# Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
- vlan-raw-device ens3
+auto {{ interface }}.20
+iface {{ interface }}.20 inet manual
+ vlan-raw-device {{ interface }}
# Container/Host management bridge
auto br-mgmt
@@ -30,23 +27,19 @@ iface br-mgmt inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3.10
+ bridge_ports {{ interface }}.10
address {{host_info[inventory_hostname].MGMT_IP}}
netmask 255.255.252.0
# OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3.30
+ bridge_ports {{ interface }}.30
+ address {{host_info[inventory_hostname].VXLAN_IP}}
+ netmask 255.255.252.0
# OpenStack Networking VLAN bridge
auto br-vlan
@@ -54,18 +47,18 @@ iface br-vlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3
+ bridge_ports {{ interface }}
address {{host_info[inventory_hostname].VLAN_IP}}
netmask 255.255.255.0
gateway 192.168.122.1
dns-nameserver 8.8.8.8 8.8.4.4
-# compute1 Storage bridge
+# OpenStack Storage bridge
auto br-storage
iface br-storage inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3.20
+ bridge_ports {{ interface }}.20
address {{host_info[inventory_hostname].STORAGE_IP}}
netmask 255.255.252.0
diff --git a/prototypes/xci/template/opnfv.interface.j2 b/prototypes/xci/template/opnfv.interface.j2
index 74aeea99d..e9f8649c6 100644
--- a/prototypes/xci/template/opnfv.interface.j2
+++ b/prototypes/xci/template/opnfv.interface.j2
@@ -1,28 +1,25 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
# The loopback network interface
auto lo
iface lo inet loopback
# Physical interface
-auto ens3
-iface ens3 inet manual
+auto {{ interface }}
+iface {{ interface }} inet manual
# Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
- vlan-raw-device ens3
+auto {{ interface }}.10
+iface {{ interface }}.10 inet manual
+ vlan-raw-device {{ interface }}
# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
- vlan-raw-device ens3
+auto {{ interface }}.30
+iface {{ interface }}.30 inet manual
+ vlan-raw-device {{ interface }}
# Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
- vlan-raw-device ens3
+auto {{ interface }}.20
+iface {{ interface }}.20 inet manual
+ vlan-raw-device {{ interface }}
# Container/Host management bridge
auto br-mgmt
@@ -30,23 +27,19 @@ iface br-mgmt inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3.10
+ bridge_ports {{ interface }}.10
address {{host_info[inventory_hostname].MGMT_IP}}
netmask 255.255.252.0
# OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3.30
+ bridge_ports {{ interface }}.30
+ address {{ host_info[inventory_hostname].VXLAN_IP }}
+ netmask 255.255.252.0
# OpenStack Networking VLAN bridge
auto br-vlan
@@ -54,18 +47,18 @@ iface br-vlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3
+ bridge_ports {{ interface }}
address {{host_info[inventory_hostname].VLAN_IP}}
netmask 255.255.255.0
gateway 192.168.122.1
dns-nameserver 8.8.8.8 8.8.4.4
-# compute1 Storage bridge
+# OpenStack Storage bridge
auto br-storage
iface br-storage inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
- bridge_ports ens3.20
+ bridge_ports {{ interface }}.20
address {{host_info[inventory_hostname].STORAGE_IP}}
netmask 255.255.252.0
diff --git a/prototypes/xci/var/Debian.yml b/prototypes/xci/var/Debian.yml
index e69de29bb..d13d08097 100644
--- a/prototypes/xci/var/Debian.yml
+++ b/prototypes/xci/var/Debian.yml
@@ -0,0 +1,11 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# this is the interface the VM nodes are connected to libvirt network "default"
+interface: "ens3"
diff --git a/prototypes/xci/var/RedHat.yml b/prototypes/xci/var/RedHat.yml
new file mode 100644
index 000000000..6d03e0f32
--- /dev/null
+++ b/prototypes/xci/var/RedHat.yml
@@ -0,0 +1,10 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# this is placeholder and left blank intentionally to complete later on
diff --git a/prototypes/xci/var/Suse.yml b/prototypes/xci/var/Suse.yml
new file mode 100644
index 000000000..6d03e0f32
--- /dev/null
+++ b/prototypes/xci/var/Suse.yml
@@ -0,0 +1,10 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# this is placeholder and left blank intentionally to complete later on
diff --git a/prototypes/xci/var/opnfv.yml b/prototypes/xci/var/opnfv.yml
new file mode 100644
index 000000000..12cb55675
--- /dev/null
+++ b/prototypes/xci/var/opnfv.yml
@@ -0,0 +1,25 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+OPNFV_RELENG_GIT_URL: "{{ lookup('env','OPNFV_RELENG_GIT_URL') }}"
+OPNFV_RELENG_PATH: "{{ lookup('env','OPNFV_RELENG_PATH') }}"
+OPNFV_RELENG_VERSION: "{{ lookup('env','OPNFV_RELENG_VERSION') }}"
+OPENSTACK_BIFROST_GIT_URL: "{{ lookup('env','OPENSTACK_BIFROST_GIT_URL') }}"
+OPENSTACK_BIFROST_PATH: "{{ lookup('env','OPENSTACK_BIFROST_PATH') }}"
+OPENSTACK_BIFROST_VERSION: "{{ lookup('env','OPENSTACK_BIFROST_VERSION') }}"
+OPENSTACK_OSA_GIT_URL: "{{ lookup('env','OPENSTACK_OSA_GIT_URL') }}"
+OPENSTACK_OSA_PATH: "{{ lookup('env','OPENSTACK_OSA_PATH') }}"
+OPENSTACK_OSA_VERSION: "{{ lookup('env','OPENSTACK_OSA_VERSION') }}"
+OPENSTACK_OSA_ETC_PATH: "{{ lookup('env','OPENSTACK_OSA_ETC_PATH') }}"
+XCI_ANSIBLE_PIP_VERSION: "{{ lookup('env','XCI_ANSIBLE_PIP_VERSION') }}"
+XCI_FLAVOR: "{{ lookup('env','XCI_FLAVOR') }}"
+XCI_FLAVOR_ANSIBLE_FILE_PATH: "{{ lookup('env','XCI_FLAVOR_ANSIBLE_FILE_PATH') }}"
+XCI_LOOP: "{{ lookup('env','XCI_LOOP') }}"
+LOG_PATH: "{{ lookup('env','LOG_PATH') }}"
+OPNFV_HOST_IP: "{{ lookup('env','OPNFV_HOST_IP') }}"
diff --git a/prototypes/xci/xci-deploy.sh b/prototypes/xci/xci-deploy.sh
index 277206dc7..2fd9be022 100755
--- a/prototypes/xci/xci-deploy.sh
+++ b/prototypes/xci/xci-deploy.sh
@@ -2,30 +2,42 @@
set -o errexit
set -o nounset
set -o pipefail
-set -o xtrace
+#-------------------------------------------------------------------------------
# This script must run as root
+#-------------------------------------------------------------------------------
if [[ $(whoami) != "root" ]]; then
echo "Error: This script must be run as root!"
exit 1
fi
+#-------------------------------------------------------------------------------
+# Set environment variables
+#-------------------------------------------------------------------------------
+# The order of sourcing the variable files is significant so please do not
+# change it or things might stop working.
+# - user-vars: variables that can be configured or overriden by user.
+# - pinned-versions: versions to checkout. These can be overriden if you want to
+# use different/more recent versions of the tools but you might end up using
+# something that is not verified by OPNFV XCI.
+# - flavor-vars: settings for VM nodes for the chosen flavor.
+# - env-vars: variables for the xci itself and you should not need to change or
+# override any of them.
+#-------------------------------------------------------------------------------
# find where are we
XCI_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-
# source user vars
source $XCI_PATH/config/user-vars
-
# source pinned versions
source $XCI_PATH/config/pinned-versions
-
# source flavor configuration
-source "$XCI_PATH/flavors/${XCI_FLAVOR}-vars"
-
+source "$XCI_PATH/config/${XCI_FLAVOR}-vars"
# source xci configuration
source $XCI_PATH/config/env-vars
-# log info to console
+#-------------------------------------------------------------------------------
+# Log info to console
+#-------------------------------------------------------------------------------
echo "Info: Starting XCI Deployment"
echo "Info: Deployment parameters"
echo "-------------------------------------------------------------------------"
@@ -36,41 +48,155 @@ echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
echo "-------------------------------------------------------------------------"
#-------------------------------------------------------------------------------
-# Cleanup the leftovers from the previous deployment
-#-------------------------------------------------------------------------------
-echo "Info: Cleaning up the previous deployment"
-$XCI_PATH/../bifrost/scripts/destroy-env.sh > /dev/null 2>&1
-/bin/rm -rf /opt/releng /opt/bifrost /opt/openstack-ansible
-
+# Install ansible on localhost
#-------------------------------------------------------------------------------
-# Clone the repositories and checkout the versions
-#-------------------------------------------------------------------------------
-echo "Info: Cloning repositories and checking out versions"
-git clone --quiet $OPNFV_RELENG_GIT_URL $OPNFV_RELENG_PATH && \
- cd $OPNFV_RELENG_PATH
-echo "Info: Cloned opnfv/releng. HEAD currently points at"
-echo " $(git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>')"
-git clone --quiet $OPENSTACK_BIFROST_GIT_URL $OPENSTACK_BIFROST_PATH && \
- cd $OPENSTACK_BIFROST_PATH
-echo "Info: Cloned openstack/bifrost. HEAD currently points at"
-echo " $(git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>')"
+pip install ansible==$XCI_ANSIBLE_PIP_VERSION
-#-------------------------------------------------------------------------------
-# Combine opnfv and upstream scripts/playbooks
-#-------------------------------------------------------------------------------
-echo "Info: Combining opnfv/releng and opestack/bifrost scripts/playbooks"
-/bin/cp -rf $OPNFV_RELENG_PATH/prototypes/bifrost/* $OPENSTACK_BIFROST_PATH/
+# TODO: The xci playbooks can be put into a playbook which will be done later.
#-------------------------------------------------------------------------------
# Start provisioning VM nodes
#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng and openstack/bifrost repositories
+# - combines opnfv/releng and openstack/bifrost scripts/playbooks
+# - destorys VMs, removes ironic db, leases, logs
+# - creates and provisions VMs for the chosen flavor
+#-------------------------------------------------------------------------------
echo "Info: Starting provisining VM nodes using openstack/bifrost"
-echo " This might take between 10 to 20 minutes depending on the flavor and the host"
echo "-------------------------------------------------------------------------"
-cd $OPENSTACK_BIFROST_PATH
-STARTTIME=$(date +%s)
-./scripts/bifrost-provision.sh
-ENDTIME=$(date +%s)
+cd $XCI_PATH/playbooks
+ansible-playbook $ANSIBLE_VERBOSITY -i inventory provision-vm-nodes.yml
echo "-----------------------------------------------------------------------"
echo "Info: VM nodes are provisioned!"
-echo "Info: It took $(($ENDTIME - $STARTTIME)) seconds to provising the VM nodes"
+source $OPENSTACK_BIFROST_PATH/env-vars
+ironic node-list
+echo
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng repository
+# - creates log directory
+# - copies flavor files such as playbook, inventory, and var file
+#-------------------------------------------------------------------------------
+echo "Info: Configuring localhost for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+cd $XCI_PATH/playbooks
+ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost host for openstack-ansible"
+
+#-------------------------------------------------------------------------------
+# Configure openstack-ansible deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng and openstack/openstack-ansible repositories
+# - configures network
+# - generates/prepares ssh keys
+# - bootstraps ansible
+# - copies flavor files to be used by openstack-ansible
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+cd $OPNFV_RELENG_PATH/prototypes/xci/playbooks
+ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for openstack-ansible"
+
+#-------------------------------------------------------------------------------
+# Skip the rest if the flavor is aio since the target host for aio is opnfv
+#-------------------------------------------------------------------------------
+if [[ $XCI_FLAVOR == "aio" ]]; then
+ echo "xci: aio has been installed"
+ exit 0
+fi
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for openstack-ansible
+#-------------------------------------------------------------------------------
+# This playbook
+# - adds public keys to target hosts
+# - configures network
+# - configures nfs
+#-------------------------------------------------------------------------------
+echo "Info: Configuring target hosts for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+cd $OPNFV_RELENG_PATH/prototypes/xci/playbooks
+ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-targethosts.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured target hosts"
+
+#-------------------------------------------------------------------------------
+# Set up target hosts for openstack-ansible
+#-------------------------------------------------------------------------------
+# This is openstack-ansible playbook. Check upstream documentation for details.
+#-------------------------------------------------------------------------------
+echo "Info: Setting up target hosts for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP openstack-ansible \
+ $OPENSTACK_OSA_PATH/playbooks/setup-hosts.yml" | \
+ tee $LOG_PATH/setup-hosts.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-hosts.log; then
+ echo "Error: OpenStack node setup failed!"
+ exit 1
+fi
+echo "Info: Set up target hosts for openstack-ansible successfuly"
+
+#-------------------------------------------------------------------------------
+# Set up infrastructure
+#-------------------------------------------------------------------------------
+# This is openstack-ansible playbook. Check upstream documentation for details.
+#-------------------------------------------------------------------------------
+echo "Info: Setting up infrastructure"
+echo "-----------------------------------------------------------------------"
+echo "xci: running ansible playbook setup-infrastructure.yml"
+sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP openstack-ansible \
+ $OPENSTACK_OSA_PATH/playbooks//setup-infrastructure.yml" | \
+ tee $LOG_PATH/setup-infrastructure.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-infrastructure.log; then
+ echo "Error: OpenStack node setup failed!"
+ exit 1
+fi
+
+#-------------------------------------------------------------------------------
+# Verify database cluster
+#-------------------------------------------------------------------------------
+echo "Info: Verifying database cluster"
+echo "-----------------------------------------------------------------------"
+sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP ansible -i $OPENSTACK_OSA_PATH/playbooks/inventory/ \
+ galera_container -m shell \
+ -a "mysql -h localhost -e 'show status like \"%wsrep_cluster_%\";'"" \
+ | tee $LOG_PATH/galera.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'FAILED' $LOG_PATH/galera.log; then
+ echo "Error: Database cluster verification failed!"
+ exit 1
+fi
+echo "Info: Database cluster verification successful!"
+
+#-------------------------------------------------------------------------------
+# Install OpenStack
+#-------------------------------------------------------------------------------
+# This is openstack-ansible playbook. Check upstream documentation for details.
+#-------------------------------------------------------------------------------
+echo "Info: Installing OpenStack on target hosts"
+echo "-----------------------------------------------------------------------"
+sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP openstack-ansible \
+ $OPENSTACK_OSA_PATH/playbooks/setup-openstack.yml" | \
+ tee $LOG_PATH/opnfv-setup-openstack.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'failed=1\|unreachable=1' $LOG_PATH/opnfv-setup-openstack.log; then
+ echo "Error: OpenStack installation failed!"
+ exit 1
+fi
+echo "Info: OpenStack installation is successfully completed!"
diff --git a/setup.py b/setup.py
new file mode 100644
index 000000000..2d9246ec4
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+from setuptools import setup
+
+setup(
+ name="opnfv",
+ version="master",
+ url="https://www.opnfv.org",
+)
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 000000000..e9f5fbb0b
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,34 @@
+# Tox (http://tox.testrun.org/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py27
+skipsdist = True
+
+[testenv]
+usedevelop = True
+setenv=
+ HOME = {envtmpdir}
+ PYTHONPATH = {toxinidir}
+
+[testenv:jjb]
+deps =
+ -rjjb/test-requirements.txt
+commands=
+ jenkins-jobs test -o job_output -r jjb/
+
+[testenv:modules]
+deps=
+ -rmodules/requirements.txt
+ -rmodules/test-requirements.txt
+commands =
+ nosetests -w modules \
+ --with-xunit \
+ --xunit-file=modules/nosetests.xml \
+ --cover-package=opnfv \
+ --with-coverage \
+ --cover-xml \
+ --cover-html \
+ tests/unit
diff --git a/utils/create_pod_file.py b/utils/create_pod_file.py
new file mode 100644
index 000000000..22943fc97
--- /dev/null
+++ b/utils/create_pod_file.py
@@ -0,0 +1,102 @@
+import os
+import yaml
+from opnfv.deployment import factory
+import argparse
+
+
+parser = argparse.ArgumentParser(description='OPNFV POD Info Generator')
+
+parser.add_argument("-t", "--INSTALLER_TYPE", help="Give INSTALLER_TYPE")
+parser.add_argument("-i", "--INSTALLER_IP", help="Give INSTALLER_IP")
+parser.add_argument("-u", "--user", help="Give username of this pod")
+parser.add_argument("-k", "--key", help="Give key file of the user")
+parser.add_argument("-p", "--password", help="Give password of the user")
+parser.add_argument("-f", "--filepath", help="Give dest path of output file")
+args = parser.parse_args()
+
+
+def check_params():
+ """
+ Check all the CLI inputs. Must give INSTALLER_TYPE, INSTALLER_IP, user
+ and filepath of the output file.
+ Need to give key or password.
+ """
+ if not args.INSTALLER_TYPE or not args.INSTALLER_IP or not args.user:
+ print("INSTALLER_TYPE, INSTALLER_IP and user are all needed.")
+ return False
+ if not args.key and not args.password:
+ print("key and password are all None. At least providing one.")
+ return False
+ if not args.filepath:
+ print("Must give the dest path of the output file.")
+ return False
+ return True
+
+
+def get_with_key():
+ """
+ Get handler of the nodes info with key file.
+ """
+ return factory.Factory.get_handler(args.INSTALLER_TYPE, args.INSTALLER_IP,
+ args.user, pkey_file=args.key)
+
+
+def get_with_passwd():
+ """
+ Get handler of the nodes info with password.
+ """
+ return factory.Factory.get_handler(args.INSTALLER_TYPE, args.INSTALLER_IP,
+ args.user, installer_pwd=args.password)
+
+
+def create_file(handler):
+ """
+ Create the yaml file of nodes info.
+ As Yardstick required, node name must be node1, node2, ... and node1 must
+ be controller.
+ Compass uses password of each node.
+ Other installers use key file of each node.
+ """
+ if not os.path.exists(os.path.dirname(args.filepath)):
+ os.path.makedirs(os.path.dirname(args.filepath))
+ nodes = handler.nodes
+ node_list = []
+ index = 1
+ for node in nodes:
+ if node.roles[0].lower() == "controller":
+ node_info = {'name': "node%s" % index, 'role': node.roles[0],
+ 'ip': node.ip, 'user': 'root'}
+ node_list.append(node_info)
+ index += 1
+ for node in nodes:
+ if node.roles[0].lower() == "compute":
+ node_info = {'name': "node%s" % index, 'role': node.roles[0],
+ 'ip': node.ip, 'user': 'root'}
+ node_list.append(node_info)
+ index += 1
+ if args.INSTALLER_TYPE == 'compass':
+ for item in node_list:
+ item['password'] = 'root'
+ else:
+ for item in node_list:
+ item['key_filename'] = '/root/.ssh/id_rsa'
+ data = {'nodes': node_list}
+ with open(args.filepath, "w") as fw:
+ yaml.dump(data, fw)
+
+
+def main():
+ if not check_params():
+ return 1
+ if args.key:
+ handler = get_with_key()
+ else:
+ handler = get_with_passwd()
+ if not handler:
+ print("Error: failed to get the node's handler.")
+ return 1
+ create_file(handler)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/utils/test/reporting/img/danube.jpg b/utils/test/reporting/img/danube.jpg
index a5778356f..2d8e27b60 100644
--- a/utils/test/reporting/img/danube.jpg
+++ b/utils/test/reporting/img/danube.jpg
Binary files differ
diff --git a/utils/test/testapi/opnfv_testapi/common/check.py b/utils/test/testapi/opnfv_testapi/common/check.py
new file mode 100644
index 000000000..be4b1df12
--- /dev/null
+++ b/utils/test/testapi/opnfv_testapi/common/check.py
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import functools
+
+from tornado import web, gen
+
+from opnfv_testapi.common import raises, message
+
+
+def authenticate(method):
+ @web.asynchronous
+ @gen.coroutine
+ @functools.wraps(method)
+ def wrapper(self, *args, **kwargs):
+ if self.auth:
+ try:
+ token = self.request.headers['X-Auth-Token']
+ except KeyError:
+ raises.Unauthorized(message.unauthorized())
+ query = {'access_token': token}
+ check = yield self._eval_db_find_one(query, 'tokens')
+ if not check:
+ raises.Forbidden(message.invalid_token())
+ ret = yield gen.coroutine(method)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrapper
+
+
+def not_exist(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ query = kwargs.get('query')
+ data = yield self._eval_db_find_one(query)
+ if not data:
+ raises.NotFound(message.not_found(self.table, query))
+ ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
+ raise gen.Return(ret)
+
+ return wrap
+
+
+def no_body(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ if self.json_args is None:
+ raises.BadRequest(message.no_body())
+ ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+ raise gen.Return(ret)
+
+ return wrap
+
+
+def miss_fields(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ fields = kwargs.get('miss_fields')
+ if fields:
+ for miss in fields:
+ miss_data = self.json_args.get(miss)
+ if miss_data is None or miss_data == '':
+ raises.BadRequest(message.missing(miss))
+ ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrap
+
+
+def carriers_exist(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ carriers = kwargs.get('carriers')
+ if carriers:
+ for table, query in carriers:
+ exist = yield self._eval_db_find_one(query(), table)
+ if not exist:
+ raises.Forbidden(message.not_found(table, query()))
+ ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrap
+
+
+def new_not_exists(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, *args, **kwargs):
+ query = kwargs.get('query')
+ if query:
+ to_data = yield self._eval_db_find_one(query())
+ if to_data:
+ raises.Forbidden(message.exist(self.table, query()))
+ ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrap
+
+
+def updated_one_not_exist(xstep):
+ @functools.wraps(xstep)
+ def wrap(self, data, *args, **kwargs):
+ db_keys = kwargs.get('db_keys')
+ query = self._update_query(db_keys, data)
+ if query:
+ to_data = yield self._eval_db_find_one(query)
+ if to_data:
+ raises.Forbidden(message.exist(self.table, query))
+ ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrap
diff --git a/utils/test/testapi/opnfv_testapi/common/message.py b/utils/test/testapi/opnfv_testapi/common/message.py
new file mode 100644
index 000000000..98536ff4b
--- /dev/null
+++ b/utils/test/testapi/opnfv_testapi/common/message.py
@@ -0,0 +1,46 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+not_found_base = 'Could Not Found'
+exist_base = 'Already Exists'
+
+
+def no_body():
+ return 'No Body'
+
+
+def not_found(key, value):
+ return '{} {} [{}]'.format(not_found_base, key, value)
+
+
+def missing(name):
+ return '{} Missing'.format(name)
+
+
+def exist(key, value):
+ return '{} [{}] {}'.format(key, value, exist_base)
+
+
+def bad_format(error):
+ return 'Bad Format [{}]'.format(error)
+
+
+def unauthorized():
+ return 'No Authentication Header'
+
+
+def invalid_token():
+ return 'Invalid Token'
+
+
+def no_update():
+ return 'Nothing to update'
+
+
+def must_int(name):
+ return '{} must be int'.format(name)
diff --git a/utils/test/testapi/opnfv_testapi/common/raises.py b/utils/test/testapi/opnfv_testapi/common/raises.py
new file mode 100644
index 000000000..ec6b8a564
--- /dev/null
+++ b/utils/test/testapi/opnfv_testapi/common/raises.py
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import httplib
+
+from tornado import web
+
+
+class Raiser(object):
+ code = httplib.OK
+
+ def __init__(self, reason):
+ raise web.HTTPError(self.code, reason)
+
+
+class BadRequest(Raiser):
+ code = httplib.BAD_REQUEST
+
+
+class Forbidden(Raiser):
+ code = httplib.FORBIDDEN
+
+
+class NotFound(Raiser):
+ code = httplib.NOT_FOUND
+
+
+class Unauthorized(Raiser):
+ code = httplib.UNAUTHORIZED
+
+
+class CodeTBD(object):
+ def __init__(self, code, reason):
+ raise web.HTTPError(code, reason)
diff --git a/utils/test/testapi/opnfv_testapi/resources/handlers.py b/utils/test/testapi/opnfv_testapi/resources/handlers.py
index bf8a92b54..955fbbef7 100644
--- a/utils/test/testapi/opnfv_testapi/resources/handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/handlers.py
@@ -21,14 +21,15 @@
##############################################################################
from datetime import datetime
-import functools
-import httplib
import json
from tornado import gen
from tornado import web
import models
+from opnfv_testapi.common import check
+from opnfv_testapi.common import message
+from opnfv_testapi.common import raises
from opnfv_testapi.tornado_swagger import swagger
DEFAULT_REPRESENTATION = "application/json"
@@ -56,9 +57,7 @@ class GenericApiHandler(web.RequestHandler):
try:
self.json_args = json.loads(self.request.body)
except (ValueError, KeyError, TypeError) as error:
- raise web.HTTPError(httplib.BAD_REQUEST,
- "Bad Json format [{}]".
- format(error))
+ raises.BadRequest(message.bad_format(str(error)))
def finish_request(self, json_object=None):
if json_object:
@@ -74,51 +73,20 @@ class GenericApiHandler(web.RequestHandler):
cls_data = self.table_cls.from_dict(data)
return cls_data.format_http()
- def authenticate(method):
- @web.asynchronous
- @gen.coroutine
- @functools.wraps(method)
- def wrapper(self, *args, **kwargs):
- if self.auth:
- try:
- token = self.request.headers['X-Auth-Token']
- except KeyError:
- raise web.HTTPError(httplib.UNAUTHORIZED,
- "No Authentication Header.")
- query = {'access_token': token}
- check = yield self._eval_db_find_one(query, 'tokens')
- if not check:
- raise web.HTTPError(httplib.FORBIDDEN,
- "Invalid Token.")
- ret = yield gen.coroutine(method)(self, *args, **kwargs)
- raise gen.Return(ret)
- return wrapper
-
- @authenticate
- def _create(self, miss_checks, db_checks, **kwargs):
+ @check.authenticate
+ @check.no_body
+ @check.miss_fields
+ @check.carriers_exist
+ @check.new_not_exists
+ def _create(self, **kwargs):
"""
:param miss_checks: [miss1, miss2]
:param db_checks: [(table, exist, query, error)]
"""
- if self.json_args is None:
- raise web.HTTPError(httplib.BAD_REQUEST, "no body")
-
data = self.table_cls.from_dict(self.json_args)
- for miss in miss_checks:
- miss_data = data.__getattribute__(miss)
- if miss_data is None or miss_data == '':
- raise web.HTTPError(httplib.BAD_REQUEST,
- '{} missing'.format(miss))
-
for k, v in kwargs.iteritems():
data.__setattr__(k, v)
- for table, exist, query, error in db_checks:
- check = yield self._eval_db_find_one(query(data), table)
- if (exist and not check) or (not exist and check):
- code, message = error(data)
- raise web.HTTPError(code, message)
-
if self.table != 'results':
data.creation_date = datetime.now()
_id = yield self._eval_db(self.table, 'insert', data.format(),
@@ -150,55 +118,27 @@ class GenericApiHandler(web.RequestHandler):
@web.asynchronous
@gen.coroutine
- def _get_one(self, query):
- data = yield self._eval_db_find_one(query)
- if data is None:
- raise web.HTTPError(httplib.NOT_FOUND,
- "[{}] not exist in table [{}]"
- .format(query, self.table))
+ @check.not_exist
+ def _get_one(self, data, query=None):
self.finish_request(self.format_data(data))
- @authenticate
- def _delete(self, query):
- data = yield self._eval_db_find_one(query)
- if data is None:
- raise web.HTTPError(httplib.NOT_FOUND,
- "[{}] not exit in table [{}]"
- .format(query, self.table))
-
+ @check.authenticate
+ @check.not_exist
+ def _delete(self, data, query=None):
yield self._eval_db(self.table, 'remove', query)
self.finish_request()
- @authenticate
- def _update(self, query, db_keys):
- if self.json_args is None:
- raise web.HTTPError(httplib.BAD_REQUEST, "No payload")
-
- # check old data exist
- from_data = yield self._eval_db_find_one(query)
- if from_data is None:
- raise web.HTTPError(httplib.NOT_FOUND,
- "{} could not be found in table [{}]"
- .format(query, self.table))
-
- data = self.table_cls.from_dict(from_data)
- # check new data exist
- equal, new_query = self._update_query(db_keys, data)
- if not equal:
- to_data = yield self._eval_db_find_one(new_query)
- if to_data is not None:
- raise web.HTTPError(httplib.FORBIDDEN,
- "{} already exists in table [{}]"
- .format(new_query, self.table))
-
- # we merge the whole document """
- edit_request = self._update_requests(data)
-
- """ Updating the DB """
- yield self._eval_db(self.table, 'update', query, edit_request,
+ @check.authenticate
+ @check.no_body
+ @check.not_exist
+ @check.updated_one_not_exist
+ def _update(self, data, query=None, **kwargs):
+ data = self.table_cls.from_dict(data)
+ update_req = self._update_requests(data)
+ yield self._eval_db(self.table, 'update', query, update_req,
check_keys=False)
- edit_request['_id'] = str(data._id)
- self.finish_request(edit_request)
+ update_req['_id'] = str(data._id)
+ self.finish_request(update_req)
def _update_requests(self, data):
request = dict()
@@ -206,7 +146,7 @@ class GenericApiHandler(web.RequestHandler):
request = self._update_request(request, k, v,
data.__getattribute__(k))
if not request:
- raise web.HTTPError(httplib.FORBIDDEN, "Nothing to update")
+ raises.Forbidden(message.no_update())
edit_request = data.format()
edit_request.update(request)
@@ -231,13 +171,13 @@ class GenericApiHandler(web.RequestHandler):
equal = True
for key in keys:
new = self.json_args.get(key)
- old = data.__getattribute__(key)
+ old = data.get(key)
if new is None:
new = old
elif new != old:
equal = False
query[key] = new
- return equal, query
+ return query if not equal else dict()
def _eval_db(self, table, method, *args, **kwargs):
exec_collection = self.db.__getattr__(table)
diff --git a/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py b/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py
index fd9ce3eb5..e21841d33 100644
--- a/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py
@@ -6,8 +6,6 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import httplib
-
import handlers
from opnfv_testapi.tornado_swagger import swagger
import pod_models
@@ -42,16 +40,10 @@ class PodCLHandler(GenericPodHandler):
@raise 403: pod already exists
@raise 400: body or name not provided
"""
- def query(data):
- return {'name': data.name}
-
- def error(data):
- message = '{} already exists as a pod'.format(data.name)
- return httplib.FORBIDDEN, message
-
- miss_checks = ['name']
- db_checks = [(self.table, False, query, error)]
- self._create(miss_checks, db_checks)
+ def query():
+ return {'name': self.json_args.get('name')}
+ miss_fields = ['name']
+ self._create(miss_fields=miss_fields, query=query)
class PodGURHandler(GenericPodHandler):
@@ -63,9 +55,7 @@ class PodGURHandler(GenericPodHandler):
@return 200: pod exist
@raise 404: pod not exist
"""
- query = dict()
- query['name'] = pod_name
- self._get_one(query)
+ self._get_one(query={'name': pod_name})
def delete(self, pod_name):
""" Remove a POD
diff --git a/utils/test/testapi/opnfv_testapi/resources/project_handlers.py b/utils/test/testapi/opnfv_testapi/resources/project_handlers.py
index 087bb8af2..d79cd3b61 100644
--- a/utils/test/testapi/opnfv_testapi/resources/project_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/project_handlers.py
@@ -6,7 +6,6 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import httplib
import handlers
from opnfv_testapi.tornado_swagger import swagger
@@ -44,16 +43,10 @@ class ProjectCLHandler(GenericProjectHandler):
@raise 403: project already exists
@raise 400: body or name not provided
"""
- def query(data):
- return {'name': data.name}
-
- def error(data):
- message = '{} already exists as a project'.format(data.name)
- return httplib.FORBIDDEN, message
-
- miss_checks = ['name']
- db_checks = [(self.table, False, query, error)]
- self._create(miss_checks, db_checks)
+ def query():
+ return {'name': self.json_args.get('name')}
+ miss_fields = ['name']
+ self._create(miss_fields=miss_fields, query=query)
class ProjectGURHandler(GenericProjectHandler):
@@ -65,7 +58,7 @@ class ProjectGURHandler(GenericProjectHandler):
@return 200: project exist
@raise 404: project not exist
"""
- self._get_one({'name': project_name})
+ self._get_one(query={'name': project_name})
@swagger.operation(nickname="updateProjectByName")
def put(self, project_name):
@@ -81,7 +74,7 @@ class ProjectGURHandler(GenericProjectHandler):
"""
query = {'name': project_name}
db_keys = ['name']
- self._update(query, db_keys)
+ self._update(query=query, db_keys=db_keys)
@swagger.operation(nickname='deleteProjectByName')
def delete(self, project_name):
@@ -90,4 +83,4 @@ class ProjectGURHandler(GenericProjectHandler):
@return 200: delete success
@raise 404: project not exist
"""
- self._delete({'name': project_name})
+ self._delete(query={'name': project_name})
diff --git a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
index 44b9f8c07..214706f5f 100644
--- a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
@@ -8,11 +8,11 @@
##############################################################################
from datetime import datetime
from datetime import timedelta
-import httplib
from bson import objectid
-from tornado import web
+from opnfv_testapi.common import message
+from opnfv_testapi.common import raises
from opnfv_testapi.resources import handlers
from opnfv_testapi.resources import result_models
from opnfv_testapi.tornado_swagger import swagger
@@ -30,8 +30,7 @@ class GenericResultHandler(handlers.GenericApiHandler):
try:
value = int(value)
except:
- raise web.HTTPError(httplib.BAD_REQUEST,
- '{} must be int'.format(key))
+ raises.BadRequest(message.must_int(key))
return value
def set_query(self):
@@ -127,7 +126,9 @@ class ResultsCLHandler(GenericResultHandler):
if last is not None:
last = self.get_int('last', last)
- self._list(self.set_query(), sort=[('start_date', -1)], last=last)
+ self._list(query=self.set_query(),
+ sort=[('start_date', -1)],
+ last=last)
@swagger.operation(nickname="createTestResult")
def post(self):
@@ -141,33 +142,21 @@ class ResultsCLHandler(GenericResultHandler):
@raise 404: pod/project/testcase not exist
@raise 400: body/pod_name/project_name/case_name not provided
"""
- def pod_query(data):
- return {'name': data.pod_name}
+ def pod_query():
+ return {'name': self.json_args.get('pod_name')}
- def pod_error(data):
- message = 'Could not find pod [{}]'.format(data.pod_name)
- return httplib.NOT_FOUND, message
+ def project_query():
+ return {'name': self.json_args.get('project_name')}
- def project_query(data):
- return {'name': data.project_name}
+ def testcase_query():
+ return {'project_name': self.json_args.get('project_name'),
+ 'name': self.json_args.get('case_name')}
- def project_error(data):
- message = 'Could not find project [{}]'.format(data.project_name)
- return httplib.NOT_FOUND, message
-
- def testcase_query(data):
- return {'project_name': data.project_name, 'name': data.case_name}
-
- def testcase_error(data):
- message = 'Could not find testcase [{}] in project [{}]'\
- .format(data.case_name, data.project_name)
- return httplib.NOT_FOUND, message
-
- miss_checks = ['pod_name', 'project_name', 'case_name']
- db_checks = [('pods', True, pod_query, pod_error),
- ('projects', True, project_query, project_error),
- ('testcases', True, testcase_query, testcase_error)]
- self._create(miss_checks, db_checks)
+ miss_fields = ['pod_name', 'project_name', 'case_name']
+ carriers = [('pods', pod_query),
+ ('projects', project_query),
+ ('testcases', testcase_query)]
+ self._create(miss_fields=miss_fields, carriers=carriers)
class ResultsGURHandler(GenericResultHandler):
@@ -181,7 +170,7 @@ class ResultsGURHandler(GenericResultHandler):
"""
query = dict()
query["_id"] = objectid.ObjectId(result_id)
- self._get_one(query)
+ self._get_one(query=query)
@swagger.operation(nickname="updateTestResultById")
def put(self, result_id):
@@ -197,4 +186,4 @@ class ResultsGURHandler(GenericResultHandler):
"""
query = {'_id': objectid.ObjectId(result_id)}
db_keys = []
- self._update(query, db_keys)
+ self._update(query=query, db_keys=db_keys)
diff --git a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
index a2856dbd7..5d420a56e 100644
--- a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
@@ -1,8 +1,7 @@
import functools
-import httplib
-
-from tornado import web
+from opnfv_testapi.common import message
+from opnfv_testapi.common import raises
from opnfv_testapi.resources import handlers
import opnfv_testapi.resources.scenario_models as models
from opnfv_testapi.tornado_swagger import swagger
@@ -65,7 +64,7 @@ class ScenariosCLHandler(GenericScenarioHandler):
query['installers'] = {'$elemMatch': elem_query}
return query
- self._list(_set_query())
+ self._list(query=_set_query())
@swagger.operation(nickname="createScenario")
def post(self):
@@ -79,16 +78,10 @@ class ScenariosCLHandler(GenericScenarioHandler):
@raise 403: scenario already exists
@raise 400: body or name not provided
"""
- def query(data):
- return {'name': data.name}
-
- def error(data):
- message = '{} already exists as a scenario'.format(data.name)
- return httplib.FORBIDDEN, message
-
- miss_checks = ['name']
- db_checks = [(self.table, False, query, error)]
- self._create(miss_checks=miss_checks, db_checks=db_checks)
+ def query():
+ return {'name': self.json_args.get('name')}
+ miss_fields = ['name']
+ self._create(miss_fields=miss_fields, query=query)
class ScenarioGURHandler(GenericScenarioHandler):
@@ -100,7 +93,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
@return 200: scenario exist
@raise 404: scenario not exist
"""
- self._get_one({'name': name})
+ self._get_one(query={'name': name})
pass
@swagger.operation(nickname="updateScenarioByName")
@@ -117,7 +110,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
"""
query = {'name': name}
db_keys = ['name']
- self._update(query, db_keys)
+ self._update(query=query, db_keys=db_keys)
@swagger.operation(nickname="deleteScenarioByName")
def delete(self, name):
@@ -127,19 +120,16 @@ class ScenarioGURHandler(GenericScenarioHandler):
@raise 404: scenario not exist:
"""
- query = {'name': name}
- self._delete(query)
+ self._delete(query={'name': name})
def _update_query(self, keys, data):
query = dict()
- equal = True
if self._is_rename():
new = self._term.get('name')
- if data.name != new:
- equal = False
+ if data.get('name') != new:
query['name'] = new
- return equal, query
+ return query
def _update_requests(self, data):
updates = {
@@ -185,8 +175,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
def _update_requests_rename(self, data):
data.name = self._term.get('name')
if not data.name:
- raise web.HTTPError(httplib.BAD_REQUEST,
- "new scenario name is not provided")
+ raises.BadRequest(message.missing('name'))
def _update_requests_add_installer(self, data):
data.installers.append(models.ScenarioInstaller.from_dict(self._term))
diff --git a/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py b/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py
index 1211a0573..9399326f0 100644
--- a/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py
@@ -6,7 +6,6 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import httplib
from opnfv_testapi.resources import handlers
from opnfv_testapi.resources import testcase_models
@@ -31,9 +30,7 @@ class TestcaseCLHandler(GenericTestcaseHandler):
empty list is no testcase exist in this project
@rtype: L{TestCases}
"""
- query = dict()
- query['project_name'] = project_name
- self._list(query)
+ self._list(query={'project_name': project_name})
@swagger.operation(nickname="createTestCase")
def post(self, project_name):
@@ -48,28 +45,18 @@ class TestcaseCLHandler(GenericTestcaseHandler):
or testcase already exists in this project
@raise 400: body or name not provided
"""
- def p_query(data):
- return {'name': data.project_name}
-
- def tc_query(data):
- return {
- 'project_name': data.project_name,
- 'name': data.name
- }
-
- def p_error(data):
- message = 'Could not find project [{}]'.format(data.project_name)
- return httplib.FORBIDDEN, message
-
- def tc_error(data):
- message = '{} already exists as a testcase in project {}'\
- .format(data.name, data.project_name)
- return httplib.FORBIDDEN, message
+ def project_query():
+ return {'name': project_name}
- miss_checks = ['name']
- db_checks = [(self.db_projects, True, p_query, p_error),
- (self.db_testcases, False, tc_query, tc_error)]
- self._create(miss_checks, db_checks, project_name=project_name)
+ def testcase_query():
+ return {'project_name': project_name,
+ 'name': self.json_args.get('name')}
+ miss_fields = ['name']
+ carriers = [(self.db_projects, project_query)]
+ self._create(miss_fields=miss_fields,
+ carriers=carriers,
+ query=testcase_query,
+ project_name=project_name)
class TestcaseGURHandler(GenericTestcaseHandler):
@@ -85,7 +72,7 @@ class TestcaseGURHandler(GenericTestcaseHandler):
query = dict()
query['project_name'] = project_name
query["name"] = case_name
- self._get_one(query)
+ self._get_one(query=query)
@swagger.operation(nickname="updateTestCaseByName")
def put(self, project_name, case_name):
@@ -103,7 +90,7 @@ class TestcaseGURHandler(GenericTestcaseHandler):
"""
query = {'project_name': project_name, 'name': case_name}
db_keys = ['name', 'project_name']
- self._update(query, db_keys)
+ self._update(query=query, db_keys=db_keys)
@swagger.operation(nickname='deleteTestCaseByName')
def delete(self, project_name, case_name):
@@ -113,4 +100,4 @@ class TestcaseGURHandler(GenericTestcaseHandler):
@raise 404: testcase not exist
"""
query = {'project_name': project_name, 'name': case_name}
- self._delete(query)
+ self._delete(query=query)
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/executor.py b/utils/test/testapi/opnfv_testapi/tests/unit/executor.py
new file mode 100644
index 000000000..b30c3258b
--- /dev/null
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/executor.py
@@ -0,0 +1,83 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import functools
+import httplib
+
+
+def create(excepted_status, excepted_response):
+ def _create(create_request):
+ @functools.wraps(create_request)
+ def wrap(self):
+ request = create_request(self)
+ status, body = self.create(request)
+ if excepted_status == httplib.OK:
+ getattr(self, excepted_response)(body)
+ else:
+ self.assertIn(excepted_response, body)
+ return wrap
+ return _create
+
+
+def get(excepted_status, excepted_response):
+ def _get(get_request):
+ @functools.wraps(get_request)
+ def wrap(self):
+ request = get_request(self)
+ status, body = self.get(request)
+ if excepted_status == httplib.OK:
+ getattr(self, excepted_response)(body)
+ else:
+ self.assertIn(excepted_response, body)
+ return wrap
+ return _get
+
+
+def update(excepted_status, excepted_response):
+ def _update(update_request):
+ @functools.wraps(update_request)
+ def wrap(self):
+ request, resource = update_request(self)
+ status, body = self.update(request, resource)
+ if excepted_status == httplib.OK:
+ getattr(self, excepted_response)(request, body)
+ else:
+ self.assertIn(excepted_response, body)
+ return wrap
+ return _update
+
+
+def delete(excepted_status, excepted_response):
+ def _delete(delete_request):
+ @functools.wraps(delete_request)
+ def wrap(self):
+ request = delete_request(self)
+ if isinstance(request, tuple):
+ status, body = self.delete(request[0], *(request[1]))
+ else:
+ status, body = self.delete(request)
+ if excepted_status == httplib.OK:
+ getattr(self, excepted_response)(body)
+ else:
+ self.assertIn(excepted_response, body)
+ return wrap
+ return _delete
+
+
+def query(excepted_status, excepted_response, number=0):
+ def _query(get_request):
+ @functools.wraps(get_request)
+ def wrap(self):
+ request = get_request(self)
+ status, body = self.query(request)
+ if excepted_status == httplib.OK:
+ getattr(self, excepted_response)(body, number)
+ else:
+ self.assertIn(excepted_response, body)
+ return wrap
+ return _query
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py
index b955f4a5a..a6e733914 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py
@@ -12,9 +12,9 @@ from os import path
import mock
from tornado import testing
-import fake_pymongo
from opnfv_testapi.cmd import server
from opnfv_testapi.resources import models
+from opnfv_testapi.tests.unit import fake_pymongo
class TestBase(testing.AsyncHTTPTestCase):
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py
index 7c43fca62..1ebc96f3b 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py
@@ -12,7 +12,7 @@ from tornado import gen
from tornado import testing
from tornado import web
-import fake_pymongo
+from opnfv_testapi.tests.unit import fake_pymongo
class MyTest(testing.AsyncHTTPTestCase):
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py
index cec90d8a5..0ed348df9 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py
@@ -9,8 +9,10 @@
import httplib
import unittest
+from opnfv_testapi.common import message
from opnfv_testapi.resources import pod_models
-import test_base as base
+from opnfv_testapi.tests.unit import executor
+from opnfv_testapi.tests.unit import test_base as base
class TestPodBase(base.TestBase):
@@ -35,48 +37,47 @@ class TestPodBase(base.TestBase):
class TestPodCreate(TestPodBase):
+ @executor.create(httplib.BAD_REQUEST, message.no_body())
def test_withoutBody(self):
- (code, body) = self.create()
- self.assertEqual(code, httplib.BAD_REQUEST)
+ return None
+ @executor.create(httplib.BAD_REQUEST, message.missing('name'))
def test_emptyName(self):
- req_empty = pod_models.PodCreateRequest('')
- (code, body) = self.create(req_empty)
- self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('name missing', body)
+ return pod_models.PodCreateRequest('')
+ @executor.create(httplib.BAD_REQUEST, message.missing('name'))
def test_noneName(self):
- req_none = pod_models.PodCreateRequest(None)
- (code, body) = self.create(req_none)
- self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('name missing', body)
+ return pod_models.PodCreateRequest(None)
+ @executor.create(httplib.OK, 'assert_create_body')
def test_success(self):
- code, body = self.create_d()
- self.assertEqual(code, httplib.OK)
- self.assert_create_body(body)
+ return self.req_d
+ @executor.create(httplib.FORBIDDEN, message.exist_base)
def test_alreadyExist(self):
self.create_d()
- code, body = self.create_d()
- self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn('already exists', body)
+ return self.req_d
class TestPodGet(TestPodBase):
+ def setUp(self):
+ super(TestPodGet, self).setUp()
+ self.create_d()
+ self.create_e()
+
+ @executor.get(httplib.NOT_FOUND, message.not_found_base)
def test_notExist(self):
- code, body = self.get('notExist')
- self.assertEqual(code, httplib.NOT_FOUND)
+ return 'notExist'
+ @executor.get(httplib.OK, 'assert_get_body')
def test_getOne(self):
- self.create_d()
- code, body = self.get(self.req_d.name)
- self.assert_get_body(body)
+ return self.req_d.name
+ @executor.get(httplib.OK, '_assert_list')
def test_list(self):
- self.create_d()
- self.create_e()
- code, body = self.get()
+ return None
+
+ def _assert_list(self, body):
self.assertEqual(len(body.pods), 2)
for pod in body.pods:
if self.req_d.name == pod.name:
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py
index 75b2d5260..9143f8a8a 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py
@@ -9,8 +9,9 @@
import httplib
import unittest
+from opnfv_testapi.common import message
from opnfv_testapi.resources import project_models
-import test_base as base
+from opnfv_testapi.tests.unit import test_base as base
class TestProjectBase(base.TestBase):
@@ -43,13 +44,13 @@ class TestProjectCreate(TestProjectBase):
req_empty = project_models.ProjectCreateRequest('')
(code, body) = self.create(req_empty)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('name missing', body)
+ self.assertIn(message.missing('name'), body)
def test_noneName(self):
req_none = project_models.ProjectCreateRequest(None)
(code, body) = self.create(req_none)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('name missing', body)
+ self.assertIn(message.missing('name'), body)
def test_success(self):
(code, body) = self.create_d()
@@ -60,7 +61,7 @@ class TestProjectCreate(TestProjectBase):
self.create_d()
(code, body) = self.create_d()
self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn('already exists', body)
+ self.assertIn(message.exist_base, body)
class TestProjectGet(TestProjectBase):
@@ -99,13 +100,13 @@ class TestProjectUpdate(TestProjectBase):
self.create_e()
code, body = self.update(self.req_e, self.req_d.name)
self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn("already exists", body)
+ self.assertIn(message.exist_base, body)
def test_noUpdate(self):
self.create_d()
code, body = self.update(self.req_d, self.req_d.name)
self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn("Nothing to update", body)
+ self.assertIn(message.no_update(), body)
def test_success(self):
self.create_d()
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py
index 05220f1d2..940279cd4 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py
@@ -11,11 +11,12 @@ from datetime import datetime, timedelta
import httplib
import unittest
+from opnfv_testapi.common import message
from opnfv_testapi.resources import pod_models
from opnfv_testapi.resources import project_models
from opnfv_testapi.resources import result_models
from opnfv_testapi.resources import testcase_models
-import test_base as base
+from opnfv_testapi.tests.unit import test_base as base
class Details(object):
@@ -135,49 +136,49 @@ class TestResultCreate(TestResultBase):
def test_nobody(self):
(code, body) = self.create(None)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('no body', body)
+ self.assertIn(message.no_body(), body)
def test_podNotProvided(self):
req = self.req_d
req.pod_name = None
(code, body) = self.create(req)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('pod_name missing', body)
+ self.assertIn(message.missing('pod_name'), body)
def test_projectNotProvided(self):
req = self.req_d
req.project_name = None
(code, body) = self.create(req)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('project_name missing', body)
+ self.assertIn(message.missing('project_name'), body)
def test_testcaseNotProvided(self):
req = self.req_d
req.case_name = None
(code, body) = self.create(req)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('case_name missing', body)
+ self.assertIn(message.missing('case_name'), body)
def test_noPod(self):
req = self.req_d
req.pod_name = 'notExistPod'
(code, body) = self.create(req)
- self.assertEqual(code, httplib.NOT_FOUND)
- self.assertIn('Could not find pod', body)
+ self.assertEqual(code, httplib.FORBIDDEN)
+ self.assertIn(message.not_found_base, body)
def test_noProject(self):
req = self.req_d
req.project_name = 'notExistProject'
(code, body) = self.create(req)
- self.assertEqual(code, httplib.NOT_FOUND)
- self.assertIn('Could not find project', body)
+ self.assertEqual(code, httplib.FORBIDDEN)
+ self.assertIn(message.not_found_base, body)
def test_noTestcase(self):
req = self.req_d
req.case_name = 'notExistTestcase'
(code, body) = self.create(req)
- self.assertEqual(code, httplib.NOT_FOUND)
- self.assertIn('Could not find testcase', body)
+ self.assertEqual(code, httplib.FORBIDDEN)
+ self.assertIn(message.not_found_base, body)
def test_success(self):
(code, body) = self.create_d()
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py
index ab2c34b31..b232bc168 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py
@@ -5,8 +5,9 @@ import httplib
import json
import os
+from opnfv_testapi.common import message
import opnfv_testapi.resources.scenario_models as models
-import test_base as base
+from opnfv_testapi.tests.unit import test_base as base
class TestScenarioBase(base.TestBase):
@@ -66,13 +67,13 @@ class TestScenarioCreate(TestScenarioBase):
req_empty = models.ScenarioCreateRequest('')
(code, body) = self.create(req_empty)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('name missing', body)
+ self.assertIn(message.missing('name'), body)
def test_noneName(self):
req_none = models.ScenarioCreateRequest(None)
(code, body) = self.create(req_none)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('name missing', body)
+ self.assertIn(message.missing('name'), body)
def test_success(self):
(code, body) = self.create_d()
@@ -83,7 +84,7 @@ class TestScenarioCreate(TestScenarioBase):
self.create_d()
(code, body) = self.create_d()
self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn('already exists', body)
+ self.assertIn(message.exist_base, body)
class TestScenarioGet(TestScenarioBase):
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py
index ec44fcae5..73c481986 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py
@@ -10,9 +10,10 @@ import copy
import httplib
import unittest
+from opnfv_testapi.common import message
from opnfv_testapi.resources import project_models
from opnfv_testapi.resources import testcase_models
-import test_base as base
+from opnfv_testapi.tests.unit import test_base as base
class TestCaseBase(base.TestBase):
@@ -84,19 +85,19 @@ class TestCaseCreate(TestCaseBase):
def test_noProject(self):
code, body = self.create(self.req_d, 'noProject')
self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn('Could not find project', body)
+ self.assertIn(message.not_found_base, body)
def test_emptyName(self):
req_empty = testcase_models.TestcaseCreateRequest('')
(code, body) = self.create(req_empty, self.project)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('name missing', body)
+ self.assertIn(message.missing('name'), body)
def test_noneName(self):
req_none = testcase_models.TestcaseCreateRequest(None)
(code, body) = self.create(req_none, self.project)
self.assertEqual(code, httplib.BAD_REQUEST)
- self.assertIn('name missing', body)
+ self.assertIn(message.missing('name'), body)
def test_success(self):
code, body = self.create_d()
@@ -107,7 +108,7 @@ class TestCaseCreate(TestCaseBase):
self.create_d()
code, body = self.create_d()
self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn('already exists', body)
+ self.assertIn(message.exist_base, body)
class TestCaseGet(TestCaseBase):
@@ -146,13 +147,13 @@ class TestCaseUpdate(TestCaseBase):
self.create_e()
code, body = self.update(self.update_e, self.req_d.name)
self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn("already exists", body)
+ self.assertIn(message.exist_base, body)
def test_noUpdate(self):
self.create_d()
code, body = self.update(self.update_d, self.req_d.name)
self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn("Nothing to update", body)
+ self.assertIn(message.no_update(), body)
def test_success(self):
self.create_d()
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py
index 9cc52a2f0..ca247a3b7 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py
@@ -8,10 +8,12 @@ import unittest
from tornado import web
-import fake_pymongo
+from opnfv_testapi.common import message
from opnfv_testapi.resources import project_models
from opnfv_testapi.router import url_mappings
-import test_base as base
+from opnfv_testapi.tests.unit import executor
+from opnfv_testapi.tests.unit import fake_pymongo
+from opnfv_testapi.tests.unit import test_base as base
class TestToken(base.TestBase):
@@ -31,22 +33,24 @@ class TestTokenCreateProject(TestToken):
fake_pymongo.tokens.insert({"access_token": "12345"})
self.basePath = '/api/v1/projects'
+ @executor.create(httplib.FORBIDDEN, message.invalid_token())
def test_projectCreateTokenInvalid(self):
self.headers['X-Auth-Token'] = '1234'
- code, body = self.create_d()
- self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn('Invalid Token.', body)
+ return self.req_d
+ @executor.create(httplib.UNAUTHORIZED, message.unauthorized())
def test_projectCreateTokenUnauthorized(self):
- self.headers.pop('X-Auth-Token')
- code, body = self.create_d()
- self.assertEqual(code, httplib.UNAUTHORIZED)
- self.assertIn('No Authentication Header.', body)
+ if 'X-Auth-Token' in self.headers:
+ self.headers.pop('X-Auth-Token')
+ return self.req_d
+ @executor.create(httplib.OK, '_create_success')
def test_projectCreateTokenSuccess(self):
self.headers['X-Auth-Token'] = '12345'
- code, body = self.create_d()
- self.assertEqual(code, httplib.OK)
+ return self.req_d
+
+ def _create_success(self, body):
+ self.assertIn('CreateResponse', str(type(body)))
class TestTokenDeleteProject(TestToken):
@@ -55,28 +59,25 @@ class TestTokenDeleteProject(TestToken):
self.req_d = project_models.ProjectCreateRequest('vping')
fake_pymongo.tokens.insert({"access_token": "12345"})
self.basePath = '/api/v1/projects'
-
- def test_projectDeleteTokenIvalid(self):
self.headers['X-Auth-Token'] = '12345'
self.create_d()
+
+ @executor.delete(httplib.FORBIDDEN, message.invalid_token())
+ def test_projectDeleteTokenIvalid(self):
self.headers['X-Auth-Token'] = '1234'
- code, body = self.delete(self.req_d.name)
- self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn('Invalid Token.', body)
+ return self.req_d.name
+ @executor.delete(httplib.UNAUTHORIZED, message.unauthorized())
def test_projectDeleteTokenUnauthorized(self):
- self.headers['X-Auth-Token'] = '12345'
- self.create_d()
self.headers.pop('X-Auth-Token')
- code, body = self.delete(self.req_d.name)
- self.assertEqual(code, httplib.UNAUTHORIZED)
- self.assertIn('No Authentication Header.', body)
+ return self.req_d.name
+ @executor.delete(httplib.OK, '_delete_success')
def test_projectDeleteTokenSuccess(self):
- self.headers['X-Auth-Token'] = '12345'
- self.create_d()
- code, body = self.delete(self.req_d.name)
- self.assertEqual(code, httplib.OK)
+ return self.req_d.name
+
+ def _delete_success(self, body):
+ self.assertEqual('', body)
class TestTokenUpdateProject(TestToken):
@@ -85,34 +86,28 @@ class TestTokenUpdateProject(TestToken):
self.req_d = project_models.ProjectCreateRequest('vping')
fake_pymongo.tokens.insert({"access_token": "12345"})
self.basePath = '/api/v1/projects'
-
- def test_projectUpdateTokenIvalid(self):
self.headers['X-Auth-Token'] = '12345'
self.create_d()
- code, body = self.get(self.req_d.name)
+
+ @executor.update(httplib.FORBIDDEN, message.invalid_token())
+ def test_projectUpdateTokenIvalid(self):
self.headers['X-Auth-Token'] = '1234'
req = project_models.ProjectUpdateRequest('newName', 'new description')
- code, body = self.update(req, self.req_d.name)
- self.assertEqual(code, httplib.FORBIDDEN)
- self.assertIn('Invalid Token.', body)
+ return req, self.req_d.name
+ @executor.update(httplib.UNAUTHORIZED, message.unauthorized())
def test_projectUpdateTokenUnauthorized(self):
- self.headers['X-Auth-Token'] = '12345'
- self.create_d()
- code, body = self.get(self.req_d.name)
self.headers.pop('X-Auth-Token')
req = project_models.ProjectUpdateRequest('newName', 'new description')
- code, body = self.update(req, self.req_d.name)
- self.assertEqual(code, httplib.UNAUTHORIZED)
- self.assertIn('No Authentication Header.', body)
+ return req, self.req_d.name
+ @executor.update(httplib.OK, '_update_success')
def test_projectUpdateTokenSuccess(self):
- self.headers['X-Auth-Token'] = '12345'
- self.create_d()
- code, body = self.get(self.req_d.name)
req = project_models.ProjectUpdateRequest('newName', 'new description')
- code, body = self.update(req, self.req_d.name)
- self.assertEqual(code, httplib.OK)
+ return req, self.req_d.name
+
+ def _update_success(self, request, body):
+ self.assertIn(request.name, body)
if __name__ == '__main__':
unittest.main()
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py
index c8f3f5062..fff802ac8 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py
@@ -6,10 +6,12 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import httplib
import unittest
from opnfv_testapi.resources import models
-import test_base as base
+from opnfv_testapi.tests.unit import executor
+from opnfv_testapi.tests.unit import test_base as base
class TestVersionBase(base.TestBase):
@@ -20,12 +22,15 @@ class TestVersionBase(base.TestBase):
class TestVersion(TestVersionBase):
+ @executor.get(httplib.OK, '_get_success')
def test_success(self):
- code, body = self.get()
- self.assertEqual(200, code)
+ return None
+
+ def _get_success(self, body):
self.assertEqual(len(body.versions), 1)
self.assertEqual(body.versions[0].version, 'v1.0')
self.assertEqual(body.versions[0].description, 'basics')
+
if __name__ == '__main__':
unittest.main()