diff options
66 files changed, 1882 insertions, 552 deletions
diff --git a/.gitignore b/.gitignore index 91ccabc4b..918e32154 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,9 @@ wheels/ venv/ ENV/ node_modules/ +.coverage +=1.3.1 +cover/ +coverage.xml +nosetests.xml +testapi_venv/ diff --git a/jjb/apex/apex-deploy.sh b/jjb/apex/apex-deploy.sh index dc70488e7..d39217a8d 100755 --- a/jjb/apex/apex-deploy.sh +++ b/jjb/apex/apex-deploy.sh @@ -15,7 +15,7 @@ if ! rpm -q wget > /dev/null; then sudo yum -y install wget fi -if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then +if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then # Build is from a verify, use local build artifacts (not RPMs) cd $WORKSPACE/../${BUILD_DIRECTORY} WORKSPACE=$(pwd) @@ -79,8 +79,8 @@ elif [[ "$DEPLOY_SCENARIO" == *gate* ]]; then fi fi -# use local build for verify and csit promote -if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then +# use local build for verify and promote +if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then if [ ! -e "${WORKSPACE}/build/lib" ]; then ln -s ${WORKSPACE}/lib ${WORKSPACE}/build/lib fi @@ -159,7 +159,7 @@ if [ "$OPNFV_CLEAN" == 'yes' ]; then else clean_opts='' fi - if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then + if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then sudo CONFIG=${CONFIG} LIB=${LIB} ./clean.sh ${clean_opts} else sudo CONFIG=${CONFIG} LIB=${LIB} opnfv-clean ${clean_opts} @@ -181,26 +181,19 @@ fi if [[ "$JOB_NAME" == *virtual* ]]; then # settings for virtual deployment - if [ "$IPV6_FLAG" == "True" ]; then - NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml" - elif echo ${DEPLOY_SCENARIO} | grep fdio; then - NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml" - else - NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml" - fi DEPLOY_CMD="${DEPLOY_CMD} -v" + if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then + DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 14 --virtual-compute-ram 8" + fi if [[ "$JOB_NAME" == *csit* ]]; then - DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml --virtual-computes 2" + DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml" + fi + if [[ "$JOB_NAME" == *promote* ]]; then + DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2" fi else # settings for bare metal deployment - if [ "$IPV6_FLAG" == "True" ]; then - NETWORK_FILE="/root/network/network_settings_v6.yaml" - elif [[ "$JOB_NAME" == *master* ]]; then - NETWORK_FILE="/root/network/network_settings-master.yaml" - else - NETWORK_FILE="/root/network/network_settings.yaml" - fi + NETWORK_SETTINGS_DIR="/root/network" INVENTORY_FILE="/root/inventory/pod_settings.yaml" if ! sudo test -e "$INVENTORY_FILE"; then @@ -211,6 +204,14 @@ else DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}" fi +if [ "$IPV6_FLAG" == "True" ]; then + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml" +elif echo ${DEPLOY_SCENARIO} | grep fdio; then + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml" +else + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml" +fi + # Check that network settings file exists if ! sudo test -e "$NETWORK_FILE"; then echo "ERROR: Required settings file missing: Network Settings file ${NETWORK_FILE}" diff --git a/jjb/apex/apex-snapshot-create.sh b/jjb/apex/apex-snapshot-create.sh index f146dd810..b2a39449e 100644 --- a/jjb/apex/apex-snapshot-create.sh +++ b/jjb/apex/apex-snapshot-create.sh @@ -13,6 +13,7 @@ set -o nounset set -o pipefail SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error) +SNAP_TYPE=$(echo ${JOB_NAME} | sed -n 's/^apex-\(.\+\)-promote.*$/\1/p') echo "Creating Apex snapshot..." echo "-------------------------" @@ -81,17 +82,19 @@ sudo chown jenkins-ci:jenkins-ci * # tar up artifacts DATE=`date +%Y-%m-%d` -tar czf ../apex-csit-snap-${DATE}.tar.gz . +tar czf ../apex-${SNAP_TYPE}-snap-${DATE}.tar.gz . popd > /dev/null sudo rm -rf ${tmp_dir} -echo "Snapshot saved as apex-csit-snap-${DATE}.tar.gz" +echo "Snapshot saved as apex-${SNAP_TYPE}-snap-${DATE}.tar.gz" # update opnfv properties file -curl -O -L http://$GS_URL/snapshot.properties -sed -i '/^OPNFV_SNAP_URL=/{h;s#=.*#='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#};${x;/^$/{s##OPNFV_SNAP_URL='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#;H};x}' snapshot.properties -snap_sha=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1) -sed -i '/^OPNFV_SNAP_SHA512SUM=/{h;s/=.*/='${snap_sha}'/};${x;/^$/{s//OPNFV_SNAP_SHA512SUM='${snap_sha}'/;H};x}' snapshot.properties -echo "OPNFV_SNAP_URL=$GS_URL/apex-csit-snap-${DATE}.tar.gz" -echo "OPNFV_SNAP_SHA512SUM=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1)" -echo "Updated properties file: " -cat snapshot.properties +if [ "$SNAP_TYPE" == 'csit' ]; then + curl -O -L http://$GS_URL/snapshot.properties + sed -i '/^OPNFV_SNAP_URL=/{h;s#=.*#='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#};${x;/^$/{s##OPNFV_SNAP_URL='${GS_URL}'/apex-csit-snap-'${DATE}'.tar.gz#;H};x}' snapshot.properties + snap_sha=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1) + sed -i '/^OPNFV_SNAP_SHA512SUM=/{h;s/=.*/='${snap_sha}'/};${x;/^$/{s//OPNFV_SNAP_SHA512SUM='${snap_sha}'/;H};x}' snapshot.properties + echo "OPNFV_SNAP_URL=$GS_URL/apex-csit-snap-${DATE}.tar.gz" + echo "OPNFV_SNAP_SHA512SUM=$(sha512sum apex-csit-snap-${DATE}.tar.gz | cut -d' ' -f1)" + echo "Updated properties file: " + cat snapshot.properties +fi diff --git a/jjb/apex/apex-upload-artifact.sh b/jjb/apex/apex-upload-artifact.sh index ef8ad5329..15aa67a6b 100755 --- a/jjb/apex/apex-upload-artifact.sh +++ b/jjb/apex/apex-upload-artifact.sh @@ -76,12 +76,15 @@ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.la uploadsnap () { # Uploads snapshot artifact and updated properties file echo "Uploading snapshot artifacts" - gsutil cp $WORKSPACE/apex-csit-snap-`date +%Y-%m-%d`.tar.gz gs://$GS_URL/ > gsutil.iso.log - gsutil cp $WORKSPACE/snapshot.properties gs://$GS_URL/snapshot.properties > gsutil.latest.log + SNAP_TYPE=$(echo ${JOB_NAME} | sed -n 's/^apex-\(.\+\)-promote.*$/\1/p') + gsutil cp $WORKSPACE/apex-${SNAP_TYPE}-snap-`date +%Y-%m-%d`.tar.gz gs://$GS_URL/ > gsutil.iso.log + if [ "$SNAP_TYPE" == 'csit' ]; then + gsutil cp $WORKSPACE/snapshot.properties gs://$GS_URL/snapshot.properties > gsutil.latest.log + fi echo "Upload complete for Snapshot" } -if echo $WORKSPACE | grep csit > /dev/null; then +if echo $WORKSPACE | grep promote > /dev/null; then uploadsnap elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then echo "Signing Key avaliable" diff --git a/jjb/apex/apex.yml b/jjb/apex/apex.yml index ff9fbec14..126651e6c 100644 --- a/jjb/apex/apex.yml +++ b/jjb/apex/apex.yml @@ -14,6 +14,7 @@ - 'apex-build-colorado' - 'apex-deploy-baremetal-os-odl_l2-fdio-ha-colorado' - 'apex-csit-promote-daily-{stream}' + - 'apex-fdio-promote-daily-{stream}' # stream: branch with - in place of / (eg. stable-arno) # branch: branch (eg. stable/arno) @@ -34,6 +35,7 @@ - 'os-nosdn-ovs-noha' - 'os-nosdn-fdio-noha' - 'os-nosdn-fdio-ha' + - 'os-odl_l2-fdio-noha' - 'os-odl_l2-fdio-ha' - 'os-odl_l2-netvirt_gbp_fdio-noha' - 'os-odl_l2-sfc-noha' @@ -177,21 +179,6 @@ - 'apex-unit-test' - 'apex-build' - trigger-builds: - - project: 'apex-deploy-virtual-os-nosdn-nofeature-ha-{stream}' - predefined-parameters: | - BUILD_DIRECTORY=apex-verify-{stream} - OPNFV_CLEAN=yes - git-revision: false - block: true - same-node: true - - trigger-builds: - - project: 'functest-apex-{verify-slave}-suite-{stream}' - predefined-parameters: | - DEPLOY_SCENARIO=os-nosdn-nofeature-ha - FUNCTEST_SUITE_NAME=healthcheck - block: true - same-node: true - - trigger-builds: - project: 'apex-deploy-virtual-os-odl_l3-nofeature-ha-{stream}' predefined-parameters: | BUILD_DIRECTORY=apex-verify-{stream} @@ -318,7 +305,7 @@ blocking-jobs: - 'apex-daily.*' - 'apex-verify.*' - - 'apex-csit.*' + - 'apex-.*-promote.*' builders: - trigger-builds: @@ -570,7 +557,7 @@ - 'apex-deploy.*' - 'apex-build.*' - 'apex-runner.*' - - 'apex-csit.*' + - 'apex-.*-promote.*' triggers: - 'apex-{stream}' @@ -702,6 +689,67 @@ build-step-failure-threshold: 'never' failure-threshold: 'never' unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'apex-deploy-baremetal-os-odl_l2-fdio-noha-{stream}' + predefined-parameters: | + BUILD_DIRECTORY=apex-build-{stream}/.build + OPNFV_CLEAN=yes + git-revision: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + block: true + - trigger-builds: + - project: 'functest-apex-{daily-slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l2-fdio-noha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'yardstick-apex-{slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l2-fdio-noha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'apex-deploy-baremetal-os-odl_l2-fdio-ha-{stream}' + predefined-parameters: | + BUILD_DIRECTORY=apex-build-{stream}/.build + OPNFV_CLEAN=yes + git-revision: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + block: true + - trigger-builds: + - project: 'functest-apex-{daily-slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l2-fdio-ha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'yardstick-apex-{slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl_l2-fdio-ha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + # Colorado Build - job-template: name: 'apex-build-colorado' @@ -914,6 +962,55 @@ - shell: !include-raw-escape: ./apex-upload-artifact.sh +# FDIO promote +- job-template: + name: 'apex-fdio-promote-daily-{stream}' + + # Job template for promoting CSIT Snapshots + # + # Required Variables: + # stream: branch with - in place of / (eg. stable) + # branch: branch (eg. stable) + node: '{daily-slave}' + + disabled: false + + scm: + - git-scm + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - apex-parameter: + gs-pathname: '{gs-pathname}' + + properties: + - build-blocker: + use-build-blocker: true + block-level: 'NODE' + blocking-jobs: + - 'apex-verify.*' + - 'apex-deploy.*' + - 'apex-build.*' + - 'apex-runner.*' + - 'apex-daily.*' + + builders: + - 'apex-build' + - trigger-builds: + - project: 'apex-deploy-virtual-os-odl_l2-fdio-noha-{stream}' + predefined-parameters: | + BUILD_DIRECTORY=apex-fdio-promote-daily-{stream} + OPNFV_CLEAN=yes + git-revision: false + block: true + same-node: true + - shell: + !include-raw-escape: ./apex-snapshot-create.sh + - shell: + !include-raw-escape: ./apex-upload-artifact.sh + - job-template: name: 'apex-gs-clean-{stream}' diff --git a/jjb/bottlenecks/bottlenecks-ci-jobs.yml b/jjb/bottlenecks/bottlenecks-ci-jobs.yml index a9ccd6977..2779e316b 100644 --- a/jjb/bottlenecks/bottlenecks-ci-jobs.yml +++ b/jjb/bottlenecks/bottlenecks-ci-jobs.yml @@ -72,7 +72,8 @@ suite: - 'rubbos' - 'vstf' - - 'posca' + - 'posca_stress_traffic' + - 'posca_stress_ping' jobs: - 'bottlenecks-{installer}-{suite}-{pod}-daily-{stream}' @@ -137,65 +138,14 @@ - builder: name: bottlenecks-env-cleanup builders: - - shell: | - #!/bin/bash - set -e - [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" - - echo "Bottlenecks: docker containers/images cleaning up" - if [[ ! -z $(docker ps -a | grep opnfv/bottlenecks) ]]; then - echo "removing existing opnfv/bottlenecks containers" - docker ps -a | grep opnfv/bottlenecks | awk '{print $1}' | xargs docker rm -f >$redirect - fi - - if [[ ! -z $(docker images | grep opnfv/bottlenecks) ]]; then - echo "Bottlenecks: docker images to remove:" - docker images | head -1 && docker images | grep opnfv/bottlenecks - image_tags=($(docker images | grep opnfv/bottlenecks | awk '{print $2}')) - for tag in "${image_tags[@]}"; do - echo "Removing docker image opnfv/bottlenecks:$tag..." - docker rmi opnfv/bottlenecks:$tag >$redirect - done - fi + - shell: + !include-raw: ./bottlenecks-cleanup.sh - builder: name: bottlenecks-run-suite builders: - - shell: | - #!/bin/bash - set -e - [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" - - echo "Bottlenecks: to pull image opnfv/bottlenecks:${DOCKER_TAG}" - docker pull opnfv/bottlenecks:$DOCKER_TAG >${redirect} - - echo "Bottlenecks: docker start running" - opts="--privileged=true -id" - envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \ - -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \ - -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \ - -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}" - cmd="sudo docker run ${opts} ${envs} opnfv/bottlenecks:${DOCKER_TAG} /bin/bash" - echo "Bottlenecks: docker cmd running ${cmd}" - ${cmd} >${redirect} - - echo "Bottlenecks: obtain docker id" - container_id=$(docker ps | grep "opnfv/bottlenecks:${DOCKER_TAG}" | awk '{print $1}' | head -1) - if [ -z ${container_id} ]; then - echo "Cannot find opnfv/bottlenecks container ID ${container_id}. Please check if it exists." - docker ps -a - exit 1 - fi - - echo "Bottlenecks: to prepare openstack environment" - prepare_env="${REPO_DIR}/ci/prepare_env.sh" - echo "Bottlenecks: docker cmd running: ${prepare_env}" - sudo docker exec ${container_id} ${prepare_env} - - echo "Bottlenecks: to run testsuite ${SUITE_NAME}" - run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}" - echo "Bottlenecks: docker cmd running: ${run_testsuite}" - sudo docker exec ${container_id} ${run_testsuite} + - shell: + !include-raw: ./bottlenecks-run-suite.sh #################### # parameter macros diff --git a/jjb/bottlenecks/bottlenecks-cleanup.sh b/jjb/bottlenecks/bottlenecks-cleanup.sh new file mode 100644 index 000000000..0ba042318 --- /dev/null +++ b/jjb/bottlenecks/bottlenecks-cleanup.sh @@ -0,0 +1,111 @@ +#!/bin/bash +set -e +[[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" + +BOTTLENECKS_IMAGE=opnfv/bottlenecks +echo "Bottlenecks: docker containers/images cleaning up" + +dangling_images=($(docker images -f "dangling=true" | grep $BOTTLENECKS_IMAGE | awk '{print $3}')) +if [[ -n $dangling_images ]]; then + echo "Removing $BOTTLENECKS_IMAGE:<none> dangling images and their containers" + docker images | head -1 && docker images | grep $dangling_images + for image_id in "${dangling_images[@]}"; do + echo "Bottlenecks: Removing dangling image $image_id" + docker rmi -f $image_id >${redirect} + done +fi + +for image_id in "${dangling_images[@]}"; do + if [[ -n $(docker ps -a | grep $image_id) ]]; then + echo "Bottlenecks: Removing containers associated with dangling image: $image_id" + docker ps -a | head -1 && docker ps -a | grep $image_id + docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + fi +done + +if [[ -n $(docker ps -a | grep $BOTTLENECKS_IMAGE) ]]; then + echo "Removing existing $BOTTLENECKS_IMAGE containers" + docker ps -a | grep $BOTTLENECKS_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect +fi + +if [[ -n $(docker images | grep $BOTTLENECKS_IMAGE) ]]; then + echo "Bottlenecks: docker images to remove:" + docker images | head -1 && docker images | grep $BOTTLENECKS_IMAGE + image_tags=($(docker images | grep $BOTTLENECKS_IMAGE | awk '{print $2}')) + for tag in "${image_tags[@]}"; do + echo "Removing docker image $BOTTLENECKS_IMAGE:$tag..." + docker rmi $BOTTLENECKS_IMAGE:$tag >$redirect + done +fi + +echo "Yardstick: docker containers/images cleaning up" +YARDSTICK_IMAGE=opnfv/yardstick + +dangling_images=($(docker images -f "dangling=true" | grep $YARDSTICK_IMAGE | awk '{print $3}')) +if [[ -n $dangling_images ]]; then + echo "Removing $YARDSTICK_IMAGE:<none> dangling images and their containers" + docker images | head -1 && docker images | grep $dangling_images + for image_id in "${dangling_images[@]}"; do + echo "Yardstick: Removing dangling image $image_id" + docker rmi -f $image_id >${redirect} + done +fi + +for image_id in "${dangling_images[@]}"; do + if [[ -n $(docker ps -a | grep $image_id) ]]; then + echo "Yardstick: Removing containers associated with dangling image: $image_id" + docker ps -a | head -1 && docker ps -a | grep $image_id + docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + fi +done + +if [[ -n $(docker ps -a | grep $YARDSTICK_IMAGE) ]]; then + echo "Removing existing $YARDSTICK_IMAGE containers" + docker ps -a | grep $YARDSTICK_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect +fi + +if [[ -n $(docker images | grep $YARDSTICK_IMAGE) ]]; then + echo "Yardstick: docker images to remove:" + docker images | head -1 && docker images | grep $YARDSTICK_IMAGE + image_tags=($(docker images | grep $YARDSTICK_IMAGE | awk '{print $2}')) + for tag in "${image_tags[@]}"; do + echo "Removing docker image $YARDSTICK_IMAGE:$tag..." + docker rmi $YARDSTICK_IMAGE:$tag >$redirect + done +fi + +echo "InfluxDB: docker containers/images cleaning up" +INFLUXDB_IMAGE=tutum/influxdb + +dangling_images=($(docker images -f "dangling=true" | grep $INFLUXDB_IMAGE | awk '{print $3}')) +if [[ -n $dangling_images ]]; then + echo "Removing $INFLUXDB_IMAGE:<none> dangling images and their containers" + docker images | head -1 && docker images | grep $dangling_images + for image_id in "${dangling_images[@]}"; do + echo "InfluxDB: Removing dangling image $image_id" + docker rmi -f $image_id >${redirect} + done +fi + +for image_id in "${dangling_images[@]}"; do + if [[ -n $(docker ps -a | grep $image_id) ]]; then + echo "InfluxDB: Removing containers associated with dangling image: $image_id" + docker ps -a | head -1 && docker ps -a | grep $image_id + docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + fi +done + +if [[ -n $(docker ps -a | grep $INFLUXDB_IMAGE) ]]; then + echo "Removing existing $INFLUXDB_IMAGE containers" + docker ps -a | grep $INFLUXDB_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect +fi + +if [[ -n $(docker images | grep $INFLUXDB_IMAGE) ]]; then + echo "InfluxDB: docker images to remove:" + docker images | head -1 && docker images | grep $INFLUXDB_IMAGE + image_tags=($(docker images | grep $INFLUXDB_IMAGE | awk '{print $2}')) + for tag in "${image_tags[@]}"; do + echo "Removing docker image $INFLUXDB_IMAGE:$tag..." + docker rmi $INFLUXDB_IMAGE:$tag >$redirect + done +fi
\ No newline at end of file diff --git a/jjb/bottlenecks/bottlenecks-project-jobs.yml b/jjb/bottlenecks/bottlenecks-project-jobs.yml index 12ea31b13..a0abb9331 100644 --- a/jjb/bottlenecks/bottlenecks-project-jobs.yml +++ b/jjb/bottlenecks/bottlenecks-project-jobs.yml @@ -29,7 +29,8 @@ suite: - 'rubbos' - 'vstf' - - 'posca' + - 'posca_stress_traffic' + - 'posca_stress_ping' ################################ # job templates diff --git a/jjb/bottlenecks/bottlenecks-run-suite.sh b/jjb/bottlenecks/bottlenecks-run-suite.sh new file mode 100644 index 000000000..f69463fc2 --- /dev/null +++ b/jjb/bottlenecks/bottlenecks-run-suite.sh @@ -0,0 +1,65 @@ +#!/bin/bash +#set -e +[[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" +BOTTLENECKS_IMAGE=opnfv/bottlenecks + +if [[ $SUITE_NAME == rubbos || $SUITE_NAME == vstf ]]; then + echo "Bottlenecks: to pull image $BOTTLENECKS_IMAGE:${DOCKER_TAG}" + docker pull $BOTTLENECKS_IMAGE:$DOCKER_TAG >${redirect} + + echo "Bottlenecks: docker start running" + opts="--privileged=true -id" + envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \ + -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \ + -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \ + -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}" + cmd="sudo docker run ${opts} ${envs} $BOTTLENECKS_IMAGE:${DOCKER_TAG} /bin/bash" + echo "Bottlenecks: docker cmd running ${cmd}" + ${cmd} >${redirect} + + echo "Bottlenecks: obtain docker id" + container_id=$(docker ps | grep "$BOTTLENECKS_IMAGE:${DOCKER_TAG}" | awk '{print $1}' | head -1) + if [ -z ${container_id} ]; then + echo "Cannot find $BOTTLENECKS_IMAGE container ID ${container_id}. Please check if it exists." + docker ps -a + exit 1 + fi + + echo "Bottlenecks: to prepare openstack environment" + prepare_env="${REPO_DIR}/ci/prepare_env.sh" + echo "Bottlenecks: docker cmd running: ${prepare_env}" + sudo docker exec ${container_id} ${prepare_env} + + echo "Bottlenecks: to run testsuite ${SUITE_NAME}" + run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}" + echo "Bottlenecks: docker cmd running: ${run_testsuite}" + sudo docker exec ${container_id} ${run_testsuite} +else + echo "Bottlenecks: installing POSCA docker-compose" + if [ -d usr/local/bin/docker-compose ]; then + rm -rf usr/local/bin/docker-compose + fi + curl -L https://github.com/docker/compose/releases/download/1.11.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose + chmod +x /usr/local/bin/docker-compose + + echo "Bottlenecks: composing up dockers" + cd $WORKSPACE + docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml up -d + + echo "Bottlenecks: running traffic stress/factor testing in posca testsuite " + POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca + if [[ $SUITE_NAME == posca_stress_traffic ]]; then + TEST_CASE=posca_factor_system_bandwidth + echo "Bottlenecks: pulling tutum/influxdb for yardstick" + docker pull tutum/influxdb:0.13 + sleep 5 + docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE + elif [[ $SUITE_NAME == posca_stress_ping ]]; then + TEST_CASE=posca_stress_ping + sleep 5 + docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE + fi + + echo "Bottlenecks: cleaning up docker-compose images and dockers" + docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml down --rmi all +fi
\ No newline at end of file diff --git a/jjb/copper/copper.yml b/jjb/copper/copper.yml index ea1af473c..b65466e01 100644 --- a/jjb/copper/copper.yml +++ b/jjb/copper/copper.yml @@ -64,5 +64,4 @@ set -o nounset set -o pipefail - cd $WORKSPACE/ci shellcheck -f tty tests/*.sh diff --git a/jjb/daisy4nfv/daisy-daily-jobs.yml b/jjb/daisy4nfv/daisy-daily-jobs.yml new file mode 100644 index 000000000..ffae70f8f --- /dev/null +++ b/jjb/daisy4nfv/daisy-daily-jobs.yml @@ -0,0 +1,199 @@ +# jenkins job templates for Daisy +# TODO +# [ ] enable baremetal jobs after baremetal deployment finish +# [ ] enable jobs in danuble +# [ ] add more scenarios +# [ ] integration with yardstick + +- project: + + name: 'daisy' + project: '{name}' + installer: '{name}' + +#-------------------------------- +# BRANCH ANCHORS +#-------------------------------- + master: &master + stream: master + branch: '{stream}' + disabled: false + gs-pathname: '' +#-------------------------------- +# POD, INSTALLER, AND BRANCH MAPPING +#-------------------------------- +# CI PODs +#-------------------------------- + pod: + - baremetal: + slave-label: daisy-baremetal + <<: *master + - virtual: + slave-label: daisy-virtual + <<: *master +#-------------------------------- +# None-CI PODs +#-------------------------------- + +#-------------------------------- +# scenarios +#-------------------------------- + scenario: + # HA scenarios + - 'os-nosdn-nofeature-ha': + auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger' + # NOHA scenarios + - 'os-nosdn-nofeature-noha': + auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger' + + jobs: + - '{project}-{scenario}-{pod}-daily-{stream}' + - '{project}-deploy-{pod}-daily-{stream}' + +######################## +# job templates +######################## +- job-template: + name: '{project}-{scenario}-{pod}-daily-{stream}' + + disabled: '{obj:disabled}' + + concurrent: false + + properties: + - logrotate-default + - throttle: + enabled: true + max-total: 4 + max-per-node: 1 + option: 'project' + - build-blocker: + use-build-blocker: true + blocking-jobs: + - 'daisy.*-deploy-({pod})?-daily-.*' + block-level: 'NODE' + + wrappers: + - build-name: + name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO' + + triggers: + - '{auto-trigger-name}' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - '{installer}-defaults' + - '{slave-label}-defaults': + installer: '{installer}' + - string: + name: DEPLOY_SCENARIO + default: '{scenario}' + - 'daisy-project-parameter': + gs-pathname: '{gs-pathname}' + + builders: + - description-setter: + description: "POD: $NODE_NAME" + - trigger-builds: + - project: 'daisy-deploy-{pod}-daily-{stream}' + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO={scenario} + same-node: true + block: true + - trigger-builds: + - project: 'functest-daisy-{pod}-daily-{stream}' + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO={scenario} + same-node: true + block: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + +- job-template: + name: '{project}-deploy-{pod}-daily-{stream}' + + disabled: '{obj:disabled}' + + concurrent: true + + properties: + - logrotate-default + - throttle: + enabled: true + max-total: 4 + max-per-node: 1 + option: 'project' + - build-blocker: + use-build-blocker: true + blocking-jobs: + - 'daisy.*-deploy-({pod})?-daily-.*' + block-level: 'NODE' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - '{installer}-defaults' + - '{slave-label}-defaults': + installer: '{installer}' + - string: + name: DEPLOY_SCENARIO + default: 'os-nosdn-nofeature-ha' + - 'daisy-project-parameter': + gs-pathname: '{gs-pathname}' + - string: + name: DEPLOY_TIMEOUT + default: '150' + description: 'Deployment timeout in minutes' + + scm: + - git-scm + + wrappers: + - build-name: + name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO' + + builders: + - description-setter: + description: "POD: $NODE_NAME" + - shell: + !include-raw-escape: ./daisy4nfv-download-artifact.sh + - shell: + !include-raw-escape: ./daisy-deploy.sh + + +######################## +# trigger macros +######################## +#----------------------------------------------- +# Triggers for job running on daisy-baremetal against master branch +#----------------------------------------------- +# HA Scenarios +- trigger: + name: 'daisy-os-nosdn-nofeature-ha-baremetal-daily-master-trigger' + triggers: + - timed: '' +# NOHA Scenarios +- trigger: + name: 'daisy-os-nosdn-nofeature-noha-baremetal-daily-master-trigger' + triggers: + - timed: '' +#----------------------------------------------- +# Triggers for job running on daisy-virtual against master branch +#----------------------------------------------- +- trigger: + name: 'daisy-os-nosdn-nofeature-ha-virtual-daily-master-trigger' + triggers: + - timed: '' +# NOHA Scenarios +- trigger: + name: 'daisy-os-nosdn-nofeature-noha-virtual-daily-master-trigger' + triggers: + - timed: 'H 8,22 * * *' + diff --git a/jjb/daisy4nfv/daisy-deploy.sh b/jjb/daisy4nfv/daisy-deploy.sh new file mode 100755 index 000000000..b512e3f60 --- /dev/null +++ b/jjb/daisy4nfv/daisy-deploy.sh @@ -0,0 +1,63 @@ +#!/bin/bash +set -o nounset +set -o pipefail + +echo "--------------------------------------------------------" +echo "This is $INSTALLER_TYPE deploy job!" +echo "--------------------------------------------------------" + +DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-nofeature-ha"} +BRIDGE=${BRIDGE:-pxebr} +LAB_NAME=${NODE_NAME/-*} +POD_NAME=${NODE_NAME/*-} +deploy_ret=0 + +if [[ ! "$NODE_NAME" =~ "-virtual" ]] && [[ ! "$LAB_NAME" =~ (zte) ]]; then + echo "Unsupported lab $LAB_NAME for now, Cannot continue!" + exit $deploy_ret +fi + +# clone the securedlab repo +cd $WORKSPACE +BASE_DIR=$(cd ./;pwd) + +echo "Cloning securedlab repo $BRANCH" +git clone ssh://jenkins-zte@gerrit.opnfv.org:29418/securedlab --quiet \ + --branch $BRANCH + +# daisy ci/deploy/deploy.sh use $BASE_DIR/labs dir +cp -r securedlab/labs . + +DEPLOY_COMMAND="sudo ./ci/deploy/deploy.sh -b $BASE_DIR \ + -l $LAB_NAME -p $POD_NAME -B $BRIDGE" + +# log info to console +echo """ +Deployment parameters +-------------------------------------------------------- +Scenario: $DEPLOY_SCENARIO +LAB: $LAB_NAME +POD: $POD_NAME +BRIDGE: $BRIDGE +BASE_DIR: $BASE_DIR + +Starting the deployment using $INSTALLER_TYPE. This could take some time... +-------------------------------------------------------- +Issuing command +$DEPLOY_COMMAND +""" + +# start the deployment +$DEPLOY_COMMAND + +if [ $? -ne 0 ]; then + echo + echo "Depolyment failed!" + deploy_ret=1 +else + echo + echo "--------------------------------------------------------" + echo "Deployment done!" +fi + +exit $deploy_ret diff --git a/jjb/daisy4nfv/daisy-project-jobs.yml b/jjb/daisy4nfv/daisy-project-jobs.yml index 156740980..0127ed094 100644 --- a/jjb/daisy4nfv/daisy-project-jobs.yml +++ b/jjb/daisy4nfv/daisy-project-jobs.yml @@ -196,7 +196,7 @@ - shell: !include-raw: ./daisy4nfv-download-artifact.sh - shell: - !include-raw: ./daisy4nfv-deploy.sh + !include-raw: ./daisy-deploy.sh - builder: name: 'daisy-test-daily-macro' diff --git a/jjb/daisy4nfv/daisy4nfv-build.sh b/jjb/daisy4nfv/daisy4nfv-build.sh index eb29fed72..375d80733 100755 --- a/jjb/daisy4nfv/daisy4nfv-build.sh +++ b/jjb/daisy4nfv/daisy4nfv-build.sh @@ -26,6 +26,7 @@ cd $WORKSPACE echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)" echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)" echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.bin" + echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $OUTPUT_DIR/opnfv-$OPNFV_ARTIFACT_VERSION.bin | cut -d' ' -f1)" echo "OPNFV_BUILD_URL=$BUILD_URL" ) > $WORKSPACE/opnfv.properties diff --git a/jjb/daisy4nfv/daisy4nfv-deploy.sh b/jjb/daisy4nfv/daisy4nfv-deploy.sh deleted file mode 100755 index cc2c10388..000000000 --- a/jjb/daisy4nfv/daisy4nfv-deploy.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo "Daisy deployment WIP" diff --git a/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml b/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml index a6659b2bf..95d851cca 100644 --- a/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml +++ b/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml @@ -193,7 +193,7 @@ - shell: !include-raw: ./daisy4nfv-download-artifact.sh - shell: - !include-raw: ./daisy4nfv-virtual-deploy.sh + !include-raw: ./daisy-deploy.sh - shell: !include-raw: ./daisy4nfv-workspace-cleanup.sh diff --git a/jjb/daisy4nfv/daisy4nfv-virtual-deploy.sh b/jjb/daisy4nfv/daisy4nfv-virtual-deploy.sh deleted file mode 100755 index ef4a07b8d..000000000 --- a/jjb/daisy4nfv/daisy4nfv-virtual-deploy.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -echo "--------------------------------------------------------" -echo "This is diasy4nfv virtual deploy job!" -echo "--------------------------------------------------------" - -cd $WORKSPACE - -if [[ "$NODE_NAME" =~ "-virtual" ]]; then - export NETWORK_CONF=./deploy/config/vm_environment/$NODE_NAME/network.yml - export DHA_CONF=./deploy/config/vm_environment/$NODE_NAME/deploy.yml -else - # TODO: For the time being, we need to pass this script to let contributors merge their work. - echo "No support for non-virtual node" - exit 0 -fi - -sudo ./ci/deploy/deploy.sh -d ${DHA_CONF} -n ${NETWORK_CONF} -p ${NODE_NAME:-"zte-virtual1"} - -if [ $? -ne 0 ]; then - echo "depolyment failed!" - deploy_ret=1 -fi - -echo -echo "--------------------------------------------------------" -echo "Done!" - -exit $deploy_ret diff --git a/jjb/doctor/doctor.yml b/jjb/doctor/doctor.yml index 2333fca14..28888d673 100644 --- a/jjb/doctor/doctor.yml +++ b/jjb/doctor/doctor.yml @@ -22,9 +22,9 @@ - fuel: slave-label: 'ool-virtual2' pod: 'ool-virtual2' - - joid: - slave-label: 'ool-virtual3' - pod: 'ool-virtual3' + #- joid: + # slave-label: 'ool-virtual3' + # pod: 'ool-virtual3' inspector: - 'sample' diff --git a/jjb/fuel/fuel-daily-jobs.yml b/jjb/fuel/fuel-daily-jobs.yml index f78c4a317..237855236 100644 --- a/jjb/fuel/fuel-daily-jobs.yml +++ b/jjb/fuel/fuel-daily-jobs.yml @@ -106,6 +106,8 @@ auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger' - 'os-nosdn-kvm_ovs_dpdk-noha': auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger' + - 'os-nosdn-kvm_ovs_dpdk_bar-noha': + auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger' jobs: - 'fuel-{scenario}-{pod}-daily-{stream}' @@ -357,7 +359,11 @@ - trigger: name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-master-trigger' triggers: - - timed: '30 16 * * *' + - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # Triggers for job running on fuel-baremetal against danube branch #----------------------------------------------- @@ -447,6 +453,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # Triggers for job running on fuel-virtual against master branch #----------------------------------------------- @@ -534,7 +544,11 @@ - trigger: name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-master-trigger' triggers: - - timed: '' + - timed: '30 16 * * *' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-master-trigger' + triggers: + - timed: '30 20 * * *' #----------------------------------------------- # Triggers for job running on fuel-virtual against danube branch #----------------------------------------------- @@ -623,6 +637,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD1 Triggers running against master branch #----------------------------------------------- @@ -711,6 +729,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-master-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD2 Triggers running against master branch @@ -800,6 +822,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-master-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD3 Triggers running against master branch #----------------------------------------------- @@ -888,6 +914,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-master-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD1 Triggers running against danube branch #----------------------------------------------- @@ -976,6 +1006,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD2 Triggers running against danube branch @@ -1065,6 +1099,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD3 Triggers running against danube branch #----------------------------------------------- @@ -1153,3 +1191,7 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-danube-trigger' + triggers: + - timed: '' diff --git a/jjb/functest/functest-ci-jobs.yml b/jjb/functest/functest-ci-jobs.yml index 49901bea2..e85144c92 100644 --- a/jjb/functest/functest-ci-jobs.yml +++ b/jjb/functest/functest-ci-jobs.yml @@ -113,6 +113,15 @@ slave-label: armband-virtual installer: fuel <<: *danube +# daisy CI PODs + - daisy-baremetal: + slave-label: daisy-baremetal + installer: daisy + <<: *master + - daisy-virtual: + slave-label: daisy-virtual + installer: daisy + <<: *master # netvirt 3rd party ci - virtual: slave-label: odl-netvirt-virtual @@ -347,8 +356,6 @@ - 'functest-cleanup' - 'set-functest-env' - 'functest-suite' - - 'functest-store-results' - - 'functest-exit' - builder: name: functest-daily diff --git a/jjb/functest/functest-suite.sh b/jjb/functest/functest-suite.sh index f28d3d037..228cc3da4 100755 --- a/jjb/functest/functest-suite.sh +++ b/jjb/functest/functest-suite.sh @@ -1,19 +1,18 @@ #!/bin/bash -set -e -echo "Functest: run $FUNCTEST_SUITE_NAME on branch $BRANCH" -if [[ "$BRANCH" =~ 'brahmaputra' ]]; then - cmd="${FUNCTEST_REPO_DIR}/docker/run_tests.sh --test $FUNCTEST_SUITE_NAME" -elif [[ "$BRANCH" =~ 'colorado' ]]; then - cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t $FUNCTEST_SUITE_NAME" -else - cmd="functest testcase run $FUNCTEST_SUITE_NAME" -fi container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1) -docker exec $container_id $cmd +if [ -z $container_id ]; then + echo "Functest container not found" + exit 1 +fi + +global_ret_val=0 -ret_value=$? -ret_val_file="${HOME}/opnfv/functest/results/${BRANCH##*/}/return_value" -echo ${ret_value}>${ret_val_file} +tests=($(echo $FUNCTEST_SUITE_NAME | tr "," "\n")) +for test in ${tests[@]}; do + cmd="python /home/opnfv/repos/functest/functest/ci/run_tests.py -t $test" + docker exec $container_id $cmd + let global_ret_val+=$? +done -exit 0 +exit $global_ret_val diff --git a/jjb/functest/set-functest-env.sh b/jjb/functest/set-functest-env.sh index abec480dc..05e3d5792 100755 --- a/jjb/functest/set-functest-env.sh +++ b/jjb/functest/set-functest-env.sh @@ -17,32 +17,34 @@ if [[ ${RC_FILE_PATH} != '' ]] && [[ -f ${RC_FILE_PATH} ]] ; then echo "Credentials file detected: ${RC_FILE_PATH}" # volume if credentials file path is given to Functest rc_file_vol="-v ${RC_FILE_PATH}:/home/opnfv/functest/conf/openstack.creds" + RC_FLAG=1 fi if [[ ${INSTALLER_TYPE} == 'apex' ]]; then ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - if sudo virsh list | grep instack; then - instack_mac=$(sudo virsh domiflist instack | grep default | \ - grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+") - elif sudo virsh list | grep undercloud; then - instack_mac=$(sudo virsh domiflist undercloud | grep default | \ + if sudo virsh list | grep undercloud; then + echo "Installer VM detected" + undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \ grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+") + INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'}) + sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa" + sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc + stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc" + + if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then + sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable + fi + if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then + sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable + fi + elif [[ "$RC_FLAG" == 1 ]]; then + echo "No available installer VM, but credentials provided...continuing" else - echo "No available installer VM exists...exiting" + echo "No available installer VM exists and no credentials provided...exiting" exit 1 fi - INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'}) - sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa" - sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc - stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc" - if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then - sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable - fi - if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then - sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable - fi fi diff --git a/jjb/global/releng-macros.yml b/jjb/global/releng-macros.yml index 9b09e315f..c245ee813 100644 --- a/jjb/global/releng-macros.yml +++ b/jjb/global/releng-macros.yml @@ -61,7 +61,21 @@ choosing-strategy: 'gerrit' refspec: '$GERRIT_REFSPEC' <<: *git-scm-defaults - +- scm: + name: git-scm-with-submodules + scm: + - git: + credentials-id: '$SSH_CREDENTIAL_ID' + url: '$GIT_BASE' + refspec: '' + branches: + - 'refs/heads/{branch}' + skip-tag: true + wipe-workspace: true + submodule: + recursive: true + timeout: 20 + shallow-clone: true - trigger: name: 'daily-trigger-disabled' triggers: @@ -72,7 +86,6 @@ triggers: - timed: '' -# NOTE: unused macro, but we may use this for some jobs. - trigger: name: gerrit-trigger-patchset-created triggers: @@ -86,12 +99,22 @@ - draft-published-event - comment-added-contains-event: comment-contains-value: 'recheck' + - comment-added-contains-event: + comment-contains-value: 'reverify' projects: - project-compare-type: 'ANT' project-pattern: '{project}' branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + file-paths: + - compare-type: 'ANT' + pattern: '{files}' + skip-vote: + successful: true + failed: true + unstable: true + notbuilt: true - trigger: name: gerrit-trigger-change-merged @@ -426,7 +449,7 @@ name: clean-workspace-log builders: - shell: | - find $WORKSPACE -type f -print -name '*.log' | xargs rm -f + find $WORKSPACE -type f -name '*.log' | xargs rm -f - publisher: name: archive-artifacts @@ -436,3 +459,23 @@ allow-empty: true fingerprint: true latest-only: true + +- publisher: + name: publish-coverage + publishers: + - cobertura: + report-file: "coverage.xml" + only-stable: "true" + health-auto-update: "true" + stability-auto-update: "true" + zoom-coverage-chart: "true" + targets: + - files: + healthy: 10 + unhealthy: 20 + failing: 30 + - method: + healthy: 50 + unhealthy: 40 + failing: 30 + diff --git a/jjb/global/slave-params.yml b/jjb/global/slave-params.yml index 429828e8e..4b3eaaabf 100644 --- a/jjb/global/slave-params.yml +++ b/jjb/global/slave-params.yml @@ -382,6 +382,20 @@ default: https://gerrit.opnfv.org/gerrit/$PROJECT description: 'Git URL to use on this Jenkins Slave' - parameter: + name: 'cengn-pod1-defaults' + parameters: + - node: + name: SLAVE_NAME + description: 'Slave name on Jenkins' + allowed-slaves: + - cengn-pod1 + default-slaves: + - cengn-pod1 + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/$PROJECT + description: 'Git URL to use on this Jenkins Slave' +- parameter: name: 'intel-pod1-defaults' parameters: - node: diff --git a/jjb/infra/bifrost-cleanup-job.yml b/jjb/infra/bifrost-cleanup-job.yml new file mode 100644 index 000000000..ba283ffae --- /dev/null +++ b/jjb/infra/bifrost-cleanup-job.yml @@ -0,0 +1,148 @@ +- project: + name: 'openstack-bifrost-cleanup' +#-------------------------------- +# branches +#-------------------------------- + stream: + - master: + branch: '{stream}' + +#-------------------------------- +# projects +#-------------------------------- + project: + - 'openstack': + project-repo: 'https://git.openstack.org/openstack/bifrost' + clone-location: '/opt/bifrost' + - 'opnfv': + project-repo: 'https://gerrit.opnfv.org/gerrit/releng' + clone-location: '/opt/releng' + +#-------------------------------- +# jobs +#-------------------------------- + jobs: + - '{project}-bifrost-cleanup-{stream}' + +- job-template: + name: '{project}-bifrost-cleanup-{stream}' + + concurrent: false + + node: bifrost-verify-virtual + + # Make sure no verify job is running on any of the slaves since that would + # produce build logs after we wipe the destination directory. + properties: + - build-blocker: + blocking-jobs: + - '{project}-bifrost-verify-*' + + parameters: + - string: + name: PROJECT + default: '{project}' + + builders: + - shell: | + #!/bin/bash + + set -eu + + # DO NOT change this unless you know what you are doing. + BIFROST_GS_URL="gs://artifacts.opnfv.org/cross-community-ci/openstack/bifrost/$GERRIT_NAME/$GERRIT_CHANGE_NUMBER/" + + # This should never happen... even 'recheck' uses the last jobs' + # gerrit information. Better exit with error so we can investigate + [[ ! -n $GERRIT_NAME ]] || [[ ! -n $GERRIT_CHANGE_NUMBER ]] && exit 1 + + echo "Removing build artifacts for $GERRIT_NAME/$GERRIT_CHANGE_NUMBER" + + if ! [[ "$BIFROST_GS_URL" =~ "/cross-community-ci/openstack/bifrost/" ]]; then + echo "Oops! BIFROST_GS_URL=$BIFROST_GS_URL does not seem like a valid" + echo "bifrost location on the Google storage server. Please double-check" + echo "that it's set properly or fix this line if necessary." + echo "gsutil will not be executed until this is fixed!" + exit 1 + fi + # No force (-f). We always verify upstream jobs so if there are no logs + # something else went wrong and we need to break immediately and investigate + gsutil rm -r $BIFROST_GS_URL + + triggers: + - '{project}-gerrit-trigger-cleanup': + branch: '{branch}' + + publishers: + - email: + recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn +#-------------------------------- +# trigger macros +#-------------------------------- +- trigger: + name: 'openstack-gerrit-trigger-cleanup' + triggers: + - gerrit: + server-name: 'review.openstack.org' + escape-quotes: true + trigger-on: + - patchset-created-event: + exclude-drafts: 'false' + exclude-trivial-rebase: 'false' + exclude-no-code-change: 'false' + - patchset-uploaded-event: 'false' + # We only run this when the change is merged since + # we don't need the logs anymore + - change-merged-event: 'true' + - change-abandoned-event: 'true' + - change-restored-event: 'false' + - draft-published-event: 'false' + # This is an OPNFV maintenance job. We don't want to provide + # feedback on Gerrit + silent: true + silent-start: true + projects: + - project-compare-type: 'PLAIN' + project-pattern: 'openstack/bifrost' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + forbidden-file-paths: + - compare-type: ANT + pattern: 'doc/**' + - compare-type: ANT + pattern: 'releasenotes/**' + readable-message: true +- trigger: + name: 'opnfv-gerrit-trigger-cleanup' + triggers: + - gerrit: + server-name: 'gerrit.opnfv.org' + trigger-on: + - patchset-created-event: + exclude-drafts: 'false' + exclude-trivial-rebase: 'false' + exclude-no-code-change: 'false' + - patchset-uploaded-event: 'false' + # We only run this when the change is merged since + # we don't need the logs anymore + - change-merged-event: 'true' + - change-abandoned-event: 'true' + - change-restored-event: 'false' + - draft-published-event: 'false' + # This is an OPNFV maintenance job. We don't want to provide + # feedback on Gerrit + silent: true + silent-start: true + projects: + - project-compare-type: 'ANT' + project-pattern: 'releng' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + file-paths: + - compare-type: ANT + pattern: 'prototypes/bifrost/**' + - compare-type: ANT + pattern: 'jjb/infra/**' + readable-message: true diff --git a/jjb/infra/bifrost-verify-jobs.yml b/jjb/infra/bifrost-verify-jobs.yml index c99023edf..d595d4bef 100644 --- a/jjb/infra/bifrost-verify-jobs.yml +++ b/jjb/infra/bifrost-verify-jobs.yml @@ -147,7 +147,7 @@ publishers: - email: - recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn + recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com julienjut@gmail.com #-------------------------------- # trigger macros #-------------------------------- diff --git a/jjb/joid/joid-daily-jobs.yml b/jjb/joid/joid-daily-jobs.yml index b28dd6025..e61524452 100644 --- a/jjb/joid/joid-daily-jobs.yml +++ b/jjb/joid/joid-daily-jobs.yml @@ -46,6 +46,9 @@ - orange-pod1: slave-label: orange-pod1 <<: *master + - cengn-pod1: + slave-label: cengn-pod1 + <<: *master #-------------------------------- # scenarios #-------------------------------- @@ -232,6 +235,10 @@ name: 'joid-os-nosdn-nofeature-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-nosdn-nofeature-ha trigger - branch: danube - trigger: name: 'joid-os-nosdn-nofeature-ha-baremetal-danube-trigger' @@ -245,6 +252,10 @@ name: 'joid-os-nosdn-nofeature-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-odl_l2-nofeature-ha trigger - branch: master - trigger: name: 'joid-os-odl_l2-nofeature-ha-baremetal-master-trigger' @@ -258,6 +269,10 @@ name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-odl_l2-nofeature-ha trigger - branch: danube - trigger: name: 'joid-os-odl_l2-nofeature-ha-baremetal-danube-trigger' @@ -271,6 +286,10 @@ name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-onos-nofeature-ha trigger - branch: master - trigger: name: 'joid-os-onos-nofeature-ha-baremetal-master-trigger' @@ -284,6 +303,10 @@ name: 'joid-os-onos-nofeature-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-nofeature-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-onos-nofeature-ha trigger - branch: danube - trigger: name: 'joid-os-onos-nofeature-ha-baremetal-danube-trigger' @@ -297,6 +320,10 @@ name: 'joid-os-onos-nofeature-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-nofeature-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-onos-sfc-ha trigger - branch: master - trigger: name: 'joid-os-onos-sfc-ha-baremetal-master-trigger' @@ -310,6 +337,10 @@ name: 'joid-os-onos-sfc-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-sfc-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-onos-sfc-ha trigger - branch: danube - trigger: name: 'joid-os-onos-sfc-ha-baremetal-danube-trigger' @@ -323,6 +354,10 @@ name: 'joid-os-onos-sfc-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-sfc-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-nosdn-lxd-noha trigger - branch: master - trigger: name: 'joid-os-nosdn-lxd-noha-baremetal-master-trigger' @@ -336,6 +371,10 @@ name: 'joid-os-nosdn-lxd-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-noha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-nosdn-lxd-noha trigger - branch: danube - trigger: name: 'joid-os-nosdn-lxd-noha-baremetal-danube-trigger' @@ -349,6 +388,10 @@ name: 'joid-os-nosdn-lxd-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-nosdn-lxd-ha trigger - branch: master - trigger: name: 'joid-os-nosdn-lxd-ha-baremetal-master-trigger' @@ -362,6 +405,10 @@ name: 'joid-os-nosdn-lxd-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-ha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-nosdn-lxd-ha trigger - branch: danube - trigger: name: 'joid-os-nosdn-lxd-ha-baremetal-danube-trigger' @@ -375,6 +422,10 @@ name: 'joid-os-nosdn-lxd-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-nosdn-nofeature-noha trigger - branch: master - trigger: name: 'joid-os-nosdn-nofeature-noha-baremetal-master-trigger' @@ -388,6 +439,10 @@ name: 'joid-os-nosdn-nofeature-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-master-trigger' + triggers: + - timed: '' # os-nosdn-nofeature-noha trigger - branch: danube - trigger: name: 'joid-os-nosdn-nofeature-noha-baremetal-danube-trigger' @@ -401,6 +456,10 @@ name: 'joid-os-nosdn-nofeature-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # k8-nosdn-nofeature-noha trigger - branch: master - trigger: name: 'joid-k8-nosdn-nofeature-noha-baremetal-master-trigger' @@ -414,6 +473,10 @@ name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-master-trigger' + triggers: + - timed: '' # k8-nosdn-nofeature-noha trigger - branch: danube - trigger: name: 'joid-k8-nosdn-nofeature-noha-baremetal-danube-trigger' @@ -427,6 +490,10 @@ name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # k8-nosdn-lb-noha trigger - branch: master - trigger: name: 'joid-k8-nosdn-lb-noha-baremetal-master-trigger' @@ -440,6 +507,10 @@ name: 'joid-k8-nosdn-lb-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-lb-noha-cengn-pod1-master-trigger' + triggers: + - timed: '' # k8-nosdn-lb-noha trigger - branch: danube - trigger: name: 'joid-k8-nosdn-lb-noha-baremetal-danube-trigger' @@ -453,3 +524,7 @@ name: 'joid-k8-nosdn-lb-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-lb-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' diff --git a/jjb/models/models.yml b/jjb/models/models.yml new file mode 100644 index 000000000..f419c8821 --- /dev/null +++ b/jjb/models/models.yml @@ -0,0 +1,67 @@ +################################################### +# All the jobs except verify have been removed! +# They will only be enabled on request by projects! +################################################### +- project: + name: models + + project: '{name}' + + jobs: + - 'models-verify-{stream}' + + stream: + - master: + branch: '{stream}' + gs-pathname: '' + disabled: false + - danube: + branch: 'stable/{stream}' + gs-pathname: '/{stream}' + disabled: false + +- job-template: + name: 'models-verify-{stream}' + + disabled: '{obj:disabled}' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - 'opnfv-build-ubuntu-defaults' + + scm: + - git-scm-gerrit + + triggers: + - gerrit: + server-name: 'gerrit.opnfv.org' + trigger-on: + - patchset-created-event: + exclude-drafts: 'false' + exclude-trivial-rebase: 'false' + exclude-no-code-change: 'false' + - draft-published-event + - comment-added-contains-event: + comment-contains-value: 'recheck' + - comment-added-contains-event: + comment-contains-value: 'reverify' + projects: + - project-compare-type: 'ANT' + project-pattern: '{project}' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + forbidden-file-paths: + - compare-type: ANT + pattern: 'docs/**|.gitignore' + + builders: + - shell: | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + shellcheck -f tty tests/*.sh diff --git a/jjb/opnfvdocs/docs-post-rtd.sh b/jjb/opnfvdocs/docs-post-rtd.sh new file mode 100644 index 000000000..e3dc9b5f0 --- /dev/null +++ b/jjb/opnfvdocs/docs-post-rtd.sh @@ -0,0 +1,7 @@ +#!/bin/bash +if [ $GERRIT_BRANCH == "master" ]; then + RTD_BUILD_VERSION=latest +else + RTD_BUILD_VERSION=${{GERRIT_BRANCH/\//-}} +fi +curl -X POST --data "version_slug=$RTD_BUILD_VERSION" https://readthedocs.org/build/opnfvdocsdemo diff --git a/jjb/opnfvdocs/docs-rtd.yaml b/jjb/opnfvdocs/docs-rtd.yaml new file mode 100644 index 000000000..01b28204e --- /dev/null +++ b/jjb/opnfvdocs/docs-rtd.yaml @@ -0,0 +1,85 @@ +- project: + name: docs-rtd + jobs: + - 'docs-merge-rtd-{stream}' + - 'docs-verify-rtd-{stream}' + + stream: + - master: + branch: 'master' + + project: 'opnfvdocs' + rtdproject: 'opnfv' + # TODO: Archive Artifacts + +- job-template: + name: 'docs-merge-rtd-{stream}' + + project-type: freestyle + + parameters: + - label: + name: SLAVE_LABEL + default: 'lf-build1' + description: 'Slave label on Jenkins' + - project-parameter: + project: '{project}' + branch: '{branch}' + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/releng + description: 'Git URL to use on this Jenkins Slave' + scm: + - git-scm + + triggers: + - gerrit-trigger-change-merged + + builders: + - shell: !include-raw: docs-post-rtd.sh + +- job-template: + name: 'docs-verify-rtd-{stream}' + + project-type: freestyle + + parameters: + - label: + name: SLAVE_LABEL + default: 'lf-build2' + description: 'Slave label on Jenkins' + - project-parameter: + project: '{project}' + branch: '{branch}' + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/opnfvdocs + description: 'Git URL to use on this Jenkins Slave' + scm: + - git-scm-with-submodules: + branch: '{branch}' + + triggers: + - gerrit-trigger-patchset-created: + server: 'gerrit.opnfv.org' + project: '**' + branch: '{branch}' + files: 'docs/**/*.rst' + - timed: 'H H * * *' + + builders: + - shell: | + if [ "$GERRIT_PROJECT" != "opnfvdocs" ]; then + cd docs/submodules/$GERRIT_PROJECT + git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD + else + git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD + fi + - shell: | + sudo pip install virtualenv + virtualenv $WORKSPACE/venv + source $WORKSPACE/venv/bin/activate + pip install --upgrade pip + pip freeze + pip install tox + tox -edocs diff --git a/jjb/qtip/qtip-verify-jobs.yml b/jjb/qtip/qtip-verify-jobs.yml index d1fc34d11..5f0292b92 100644 --- a/jjb/qtip/qtip-verify-jobs.yml +++ b/jjb/qtip/qtip-verify-jobs.yml @@ -55,6 +55,8 @@ builders: - qtip-unit-tests-and-docs-build + publishers: + - publish-coverage ################################ ## job builders diff --git a/jjb/releng/opnfv-docker.sh b/jjb/releng/opnfv-docker.sh index c906e1fcd..ded743d7e 100644 --- a/jjb/releng/opnfv-docker.sh +++ b/jjb/releng/opnfv-docker.sh @@ -43,11 +43,11 @@ fi if [[ -n "$(docker images | grep $DOCKER_REPO_NAME)" ]]; then echo "Docker images to remove:" docker images | head -1 && docker images | grep $DOCKER_REPO_NAME - image_tags=($(docker images | grep $DOCKER_REPO_NAME | awk '{print $2}')) - for tag in "${image_tags[@]}"; do - if [[ -n "$(docker images|grep $DOCKER_REPO_NAME|grep $tag)" ]]; then - echo "Removing docker image $DOCKER_REPO_NAME:$tag..." - docker rmi -f $DOCKER_REPO_NAME:$tag + image_ids=($(docker images | grep $DOCKER_REPO_NAME | awk '{print $3}')) + for id in "${image_ids[@]}"; do + if [[ -n "$(docker images|grep $DOCKER_REPO_NAME|grep $id)" ]]; then + echo "Removing docker image $DOCKER_REPO_NAME:$id..." + docker rmi -f $id fi done fi @@ -77,8 +77,12 @@ fi echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG" echo "--------------------------------------------------------" echo -cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH - -f $DOCKERFILE ." +if [[ $DOCKER_REPO_NAME == *"dovetail"* ]]; then + cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG -f $DOCKERFILE ." +else + cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH + -f $DOCKERFILE ." +fi echo ${cmd} ${cmd} diff --git a/jjb/releng/testapi-backup-mongodb.sh b/jjb/releng/testapi-backup-mongodb.sh index 8dba17beb..795e479d9 100644 --- a/jjb/releng/testapi-backup-mongodb.sh +++ b/jjb/releng/testapi-backup-mongodb.sh @@ -27,5 +27,5 @@ if [ $? != 0 ]; then else echo "Uploading mongodump to artifact $artifact_dir" /usr/local/bin/gsutil cp -r "$workspace"/"$file_name" gs://artifacts.opnfv.org/"$artifact_dir"/ - echo "MongoDump can be found at http://artifacts.opnfv.org/$artifact_dir" + echo "MongoDump can be found at http://artifacts.opnfv.org/$artifact_dir.html" fi diff --git a/jjb/ves/ves.yml b/jjb/ves/ves.yml new file mode 100644 index 000000000..5f0da3320 --- /dev/null +++ b/jjb/ves/ves.yml @@ -0,0 +1,68 @@ +################################################### +# All the jobs except verify have been removed! +# They will only be enabled on request by projects! +################################################### +- project: + name: ves + + project: '{name}' + + jobs: + - 'ves-verify-{stream}' + + stream: + - master: + branch: '{stream}' + gs-pathname: '' + disabled: false + - danube: + branch: 'stable/{stream}' + gs-pathname: '/{stream}' + disabled: false + +- job-template: + name: 'ves-verify-{stream}' + + disabled: '{obj:disabled}' + + parameters: + - project-parameter: + project: '{project}' + branch: '{branch}' + - 'opnfv-build-ubuntu-defaults' + + scm: + - git-scm-gerrit + + triggers: + - gerrit: + server-name: 'gerrit.opnfv.org' + trigger-on: + - patchset-created-event: + exclude-drafts: 'false' + exclude-trivial-rebase: 'false' + exclude-no-code-change: 'false' + - draft-published-event + - comment-added-contains-event: + comment-contains-value: 'recheck' + - comment-added-contains-event: + comment-contains-value: 'reverify' + projects: + - project-compare-type: 'ANT' + project-pattern: '{project}' + branches: + - branch-compare-type: 'ANT' + branch-pattern: '**/{branch}' + forbidden-file-paths: + - compare-type: ANT + pattern: 'docs/**|.gitignore' + + builders: + - shell: | + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + shellcheck -f tty tests/*.sh + shellcheck -f tty utils/*.sh diff --git a/modules/opnfv/deployment/apex/adapter.py b/modules/opnfv/deployment/apex/adapter.py index cb827d886..225e17438 100644 --- a/modules/opnfv/deployment/apex/adapter.py +++ b/modules/opnfv/deployment/apex/adapter.py @@ -35,28 +35,34 @@ class ApexAdapter(manager.DeploymentHandler): return None for line in lines: - if 'controller' in line: - roles = "controller" - elif 'compute' in line: - roles = "compute" - else: + roles = [] + if any(x in line for x in ['-----', 'Networks']): continue - if 'Daylight' in line: - roles += ", OpenDaylight" + if 'controller' in line: + roles.append(manager.Role.CONTROLLER) + if 'compute' in line: + roles.append(manager.Role.COMPUTE) + if 'opendaylight' in line.lower(): + roles.append(manager.Role.ODL) + fields = line.split('|') id = re.sub('[!| ]', '', fields[1]).encode() name = re.sub('[!| ]', '', fields[2]).encode() - status_node = re.sub('[!| ]', '', fields[3]).encode() + status_node = re.sub('[!| ]', '', fields[3]).encode().lower() ip = re.sub('[!| ctlplane=]', '', fields[4]).encode() - if status_node.lower() == 'active': - status = manager.Node.STATUS_OK + ssh_client = None + if 'active' in status_node: + status = manager.NodeStatus.STATUS_OK ssh_client = ssh_utils.get_ssh_client(hostname=ip, username='heat-admin', pkey_file=self.pkey_file) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None + status = manager.NodeStatus.STATUS_INACTIVE node = manager.Node(id, ip, name, status, roles, ssh_client) nodes.append(node) @@ -73,8 +79,9 @@ class ApexAdapter(manager.DeploymentHandler): "grep Description|sed 's/^.*\: //'") cmd_ver = ("sudo yum info opendaylight 2>/dev/null|" "grep Version|sed 's/^.*\: //'") + description = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): description = node.run_cmd(cmd_descr) version = node.run_cmd(cmd_ver) break diff --git a/modules/opnfv/deployment/factory.py b/modules/opnfv/deployment/factory.py index e48a751ad..1ccee4e80 100644 --- a/modules/opnfv/deployment/factory.py +++ b/modules/opnfv/deployment/factory.py @@ -41,4 +41,5 @@ class Factory(object): installer_user=installer_user, installer_pwd=installer_pwd) else: - raise Exception("Installer adapter is not implemented.") + raise Exception("Installer adapter is not implemented for " + "the given installer.") diff --git a/modules/opnfv/deployment/fuel/adapter.py b/modules/opnfv/deployment/fuel/adapter.py index 3e6ef50a0..a71d6cbf9 100644 --- a/modules/opnfv/deployment/fuel/adapter.py +++ b/modules/opnfv/deployment/fuel/adapter.py @@ -66,7 +66,7 @@ class FuelAdapter(manager.DeploymentHandler): if options and options['cluster'] and len(self.nodes) > 0: n = [] for node in self.nodes: - if node.info['cluster'] == options['cluster']: + if str(node.info['cluster']) == str(options['cluster']): n.append(node) return n @@ -114,7 +114,7 @@ class FuelAdapter(manager.DeploymentHandler): index_ip = i elif "mac" in fields[i]: index_mac = i - elif "roles " in fields[i]: + elif "roles " in fields[i] and "pending_roles" not in fields[i]: index_roles = i elif "online" in fields[i]: index_online = i @@ -124,26 +124,36 @@ class FuelAdapter(manager.DeploymentHandler): fields = lines[i].rsplit(' | ') id = fields[index_id].strip().encode() ip = fields[index_ip].strip().encode() - status_node = fields[index_status].strip().encode() + status_node = fields[index_status].strip().encode().lower() name = fields[index_name].strip().encode() - roles = fields[index_roles].strip().encode() + roles_all = fields[index_roles].strip().encode().lower() + + roles = [x for x in [manager.Role.CONTROLLER, + manager.Role.COMPUTE, + manager.Role.ODL] if x in roles_all] dict = {"cluster": fields[index_cluster].strip().encode(), "mac": fields[index_mac].strip().encode(), "status_node": status_node, "online": fields[index_online].strip().encode()} + ssh_client = None if status_node == 'ready': - status = manager.Node.STATUS_OK + status = manager.NodeStatus.STATUS_OK proxy = {'ip': self.installer_ip, 'username': self.installer_user, 'password': self.installer_pwd} ssh_client = ssh_utils.get_ssh_client(hostname=ip, username='root', proxy=proxy) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE + elif 'discover' in status_node: + status = manager.NodeStatus.STATUS_UNUSED else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None + status = manager.NodeStatus.STATUS_INACTIVE node = manager.Node( id, ip, name, status, roles, ssh_client, dict) @@ -160,26 +170,30 @@ class FuelAdapter(manager.DeploymentHandler): cmd = 'source openrc;nova-manage version 2>/dev/null' version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller() and node.is_active(): version = node.run_cmd(cmd) break return version def get_sdn_version(self): - cmd = "apt-cache show opendaylight|grep Version|sed 's/^.*\: //'" + cmd = "apt-cache show opendaylight|grep Version" version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if manager.Role.ODL in node.roles and node.is_active(): odl_version = node.run_cmd(cmd) if odl_version: - version = 'OpenDaylight ' + odl_version - break + version = 'OpenDaylight ' + odl_version.split(' ')[-1] + break return version def get_deployment_status(self): - cmd = 'fuel env|grep operational' + cmd = "fuel env|tail -1|awk '{print $3}'" result = self.installer_node.run_cmd(cmd) if result is None or len(result) == 0: - return 'failed' + return 'unknown' + elif 'operational' in result: + return 'active' + elif 'deploy' in result: + return 'deploying' else: return 'active' diff --git a/modules/opnfv/deployment/manager.py b/modules/opnfv/deployment/manager.py index 8c9599b6e..df735f157 100644 --- a/modules/opnfv/deployment/manager.py +++ b/modules/opnfv/deployment/manager.py @@ -27,7 +27,7 @@ class Deployment(object): status, openstack_version, sdn_controller, - nodes=[]): + nodes=None): self.deployment_info = { 'installer': installer, @@ -89,26 +89,37 @@ class Deployment(object): sdn_controller=self.deployment_info['sdn_controller']) for node in self.deployment_info['nodes']: - s += '\t\t{node_object}\n'.format(node_object=node) + s += '{node_object}\n'.format(node_object=node) return s -class Node(object): +class Role(): + INSTALLER = 'installer' + CONTROLLER = 'controller' + COMPUTE = 'compute' + ODL = 'opendaylight' + ONOS = 'onos' + +class NodeStatus(): STATUS_OK = 'active' STATUS_INACTIVE = 'inactive' STATUS_OFFLINE = 'offline' - STATUS_FAILED = 'failed' + STATUS_ERROR = 'error' + STATUS_UNUSED = 'unused' + + +class Node(object): def __init__(self, id, ip, name, status, - roles, - ssh_client, - info={}): + roles=None, + ssh_client=None, + info=None): self.id = id self.ip = ip self.name = name @@ -117,11 +128,21 @@ class Node(object): self.roles = roles self.info = info + self.cpu_info = 'unknown' + self.memory = 'unknown' + self.ovs = 'unknown' + + if ssh_client and Role.INSTALLER not in self.roles: + sys_info = self.get_system_info() + self.cpu_info = sys_info['cpu_info'] + self.memory = sys_info['memory'] + self.ovs = self.get_ovs_info() + def get_file(self, src, dest): ''' SCP file from a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Fetching %s from %s" % (src, self.ip)) @@ -137,7 +158,7 @@ class Node(object): ''' SCP file to a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Copying %s to %s" % (src, self.ip)) @@ -153,14 +174,16 @@ class Node(object): ''' Run command remotely on a node ''' - if self.status is not Node.STATUS_OK: - logger.info("The node %s is not active" % self.ip) - return 1 + if self.status is not NodeStatus.STATUS_OK: + logger.error( + "Error running command %s. The node %s is not active" + % (cmd, self.ip)) + return None _, stdout, stderr = (self.ssh_client.exec_command(cmd)) error = stderr.readlines() if len(error) > 0: logger.error("error %s" % ''.join(error)) - return error + return None output = ''.join(stdout.readlines()).rstrip() return output @@ -174,33 +197,91 @@ class Node(object): 'name': self.name, 'status': self.status, 'roles': self.roles, + 'cpu_info': self.cpu_info, + 'memory': self.memory, + 'ovs': self.ovs, 'info': self.info } - def get_attribute(self, attribute): + def is_active(self): ''' - Returns an attribute given the name + Returns if the node is active ''' - return self.get_dict()[attribute] + if self.status == NodeStatus.STATUS_OK: + return True + return False def is_controller(self): ''' Returns if the node is a controller ''' - if 'controller' in self.get_attribute('roles'): - return True - return False + return Role.CONTROLLER in self.roles def is_compute(self): ''' Returns if the node is a compute ''' - if 'compute' in self.get_attribute('roles'): - return True - return False + return Role.COMPUTE in self.roles + + def is_odl(self): + ''' + Returns if the node is an opendaylight + ''' + return Role.ODL in self.roles + + def get_ovs_info(self): + ''' + Returns the ovs version installed + ''' + if self.is_active(): + cmd = "ovs-vsctl --version|head -1| sed 's/^.*) //'" + return self.run_cmd(cmd) + return None + + def get_system_info(self): + ''' + Returns the ovs version installed + ''' + cmd = 'grep MemTotal /proc/meminfo' + memory = self.run_cmd(cmd).partition('MemTotal:')[-1].strip().encode() + + cpu_info = {} + cmd = 'lscpu' + result = self.run_cmd(cmd) + for line in result.splitlines(): + if line.startswith('CPU(s)'): + cpu_info['num_cpus'] = line.split(' ')[-1].encode() + elif line.startswith('Thread(s) per core'): + cpu_info['threads/core'] = line.split(' ')[-1].encode() + elif line.startswith('Core(s) per socket'): + cpu_info['cores/socket'] = line.split(' ')[-1].encode() + elif line.startswith('Model name'): + cpu_info['model'] = line.partition( + 'Model name:')[-1].strip().encode() + elif line.startswith('Architecture'): + cpu_info['arch'] = line.split(' ')[-1].encode() + + return {'memory': memory, 'cpu_info': cpu_info} def __str__(self): - return str(self.get_dict()) + return ''' + name: {name} + id: {id} + ip: {ip} + status: {status} + roles: {roles} + cpu: {cpu_info} + memory: {memory} + ovs: {ovs} + info: {info}'''.format(name=self.name, + id=self.id, + ip=self.ip, + status=self.status, + roles=self.roles, + cpu_info=self.cpu_info, + memory=self.memory, + ovs=self.ovs, + info=self.info) class DeploymentHandler(object): @@ -236,9 +317,9 @@ class DeploymentHandler(object): self.installer_node = Node(id='', ip=installer_ip, name=installer, - status='active', + status=NodeStatus.STATUS_OK, ssh_client=self.installer_connection, - roles='installer node') + roles=Role.INSTALLER) else: raise Exception( 'Cannot establish connection to the installer node!') @@ -279,6 +360,18 @@ class DeploymentHandler(object): ''' return self.installer_node + def get_arch(self): + ''' + Returns the architecture of the first compute node found + ''' + arch = None + for node in self.nodes: + if node.is_compute(): + arch = node.cpu_info.get('arch', None) + if arch: + break + return arch + def get_deployment_info(self): ''' Returns an object of type Deployment diff --git a/modules/opnfv/utils/ovs_logger.py b/modules/opnfv/utils/ovs_logger.py index 75b4cec80..7777a9a16 100644 --- a/modules/opnfv/utils/ovs_logger.py +++ b/modules/opnfv/utils/ovs_logger.py @@ -7,7 +7,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import opnfv.utils.OPNFVLogger as OPNFVLogger +import opnfv.utils.opnfv_logger as OPNFVLogger import os import time import shutil @@ -101,19 +101,13 @@ class OVSLogger(object): if timestamp is None: timestamp = time.strftime("%Y%m%d-%H%M%S") - for controller_client in controller_clients: - self.ofctl_dump_flows(controller_client, - timestamp=timestamp) - self.vsctl_show(controller_client, - timestamp=timestamp) - - for compute_client in compute_clients: - self.ofctl_dump_flows(compute_client, - timestamp=timestamp) - self.vsctl_show(compute_client, - timestamp=timestamp) + clients = controller_clients + compute_clients + for client in clients: + self.ofctl_dump_flows(client, timestamp=timestamp) + self.vsctl_show(client, timestamp=timestamp) if related_error is not None: dumpdir = os.path.join(self.ovs_dir, timestamp) + self.__mkdir_p(dumpdir) with open(os.path.join(dumpdir, 'error'), 'w') as f: f.write(related_error) diff --git a/prototypes/bifrost/scripts/test-bifrost-deployment.sh b/prototypes/bifrost/scripts/test-bifrost-deployment.sh index 914a906f4..3e2381fea 100755 --- a/prototypes/bifrost/scripts/test-bifrost-deployment.sh +++ b/prototypes/bifrost/scripts/test-bifrost-deployment.sh @@ -79,6 +79,11 @@ source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup ANSIBLE=$(which ansible-playbook) set -x -o nounset +logs_on_exit() { + $SCRIPT_HOME/collect-test-info.sh +} +trap logs_on_exit EXIT + # Change working directory cd $BIFROST_HOME/playbooks @@ -129,6 +134,4 @@ if [ $EXITCODE != 0 ]; then echo "****************************" fi -$SCRIPT_HOME/collect-test-info.sh - exit $EXITCODE diff --git a/utils/push-test-logs.sh b/utils/push-test-logs.sh index 5e428d07b..9099657c8 100644 --- a/utils/push-test-logs.sh +++ b/utils/push-test-logs.sh @@ -25,7 +25,7 @@ node_list=(\ 'intel-pod5' 'intel-pod6' 'intel-pod7' 'intel-pod8' \ 'ericsson-pod1' 'ericsson-pod2' \ 'ericsson-virtual1' 'ericsson-virtual2' 'ericsson-virtual3' \ -'ericsson-virtual4' 'ericsson-virtual5' \ +'ericsson-virtual4' 'ericsson-virtual5' 'ericsson-virtual12' \ 'arm-pod1' 'arm-pod3' \ 'huawei-pod1' 'huawei-pod2' 'huawei-pod3' 'huawei-pod4' 'huawei-pod5' \ 'huawei-pod6' 'huawei-pod7' 'huawei-pod12' \ diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py index 158ee597b..df5632335 100755 --- a/utils/test/reporting/functest/reporting-status.py +++ b/utils/test/reporting/functest/reporting-status.py @@ -61,13 +61,13 @@ logger.info("*******************************************") # Retrieve test cases of Tier 1 (smoke) config_tiers = functest_yaml_config.get("tiers") -# we consider Tier 1 (smoke),2 (features) +# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features) # to validate scenarios -# Tier > 4 are not used to validate scenarios but we display the results anyway +# Tier > 2 are not used to validate scenarios but we display the results anyway # tricky thing for the API as some tests are Functest tests # other tests are declared directly in the feature projects for tier in config_tiers: - if tier['order'] > 0 and tier['order'] < 2: + if tier['order'] >= 0 and tier['order'] < 2: for case in tier['testcases']: if case['name'] not in blacklist: testValid.append(tc.TestCase(case['name'], diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py index df0874e0b..e40aa7f00 100644 --- a/utils/test/reporting/functest/testCase.py +++ b/utils/test/reporting/functest/testCase.py @@ -43,7 +43,10 @@ class TestCase(object): 'parser': 'Parser', 'connection_check': 'Health (connection)', 'api_check': 'Health (api)', - 'snaps_smoke': 'SNAPS'} + 'snaps_smoke': 'SNAPS', + 'snaps_health_check': 'Health (dhcp)', + 'gluon_vping': 'Netready', + 'barometercollectd': 'Barometer'} try: self.displayName = display_name_matrix[self.name] except: @@ -138,8 +141,10 @@ class TestCase(object): 'parser': 'parser-basics', 'connection_check': 'connection_check', 'api_check': 'api_check', - 'snaps_smoke': 'snaps_smoke' - } + 'snaps_smoke': 'snaps_smoke', + 'snaps_health_check': 'snaps_health_check', + 'gluon_vping': 'gluon_vping', + 'barometercollectd': 'barometercollectd'} try: return test_match_matrix[self.name] except: diff --git a/utils/test/reporting/reporting.yaml b/utils/test/reporting/reporting.yaml index 9db0890b2..2fb6b7831 100644 --- a/utils/test/reporting/reporting.yaml +++ b/utils/test/reporting/reporting.yaml @@ -36,12 +36,20 @@ functest: - ovno - security_scan - rally_sanity + - healthcheck + - odl_netvirt + - aaa + - cloudify_ims + - orchestra_ims + - juju_epc + - orchestra + - promise max_scenario_criteria: 50 test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml log_level: ERROR jenkins_url: https://build.opnfv.org/ci/view/functest/job/ exclude_noha: False - exclude_virtual: True + exclude_virtual: False yardstick: test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml diff --git a/utils/test/reporting/utils/reporting_utils.py b/utils/test/reporting/utils/reporting_utils.py index fc5d188af..1879fb628 100644 --- a/utils/test/reporting/utils/reporting_utils.py +++ b/utils/test/reporting/utils/reporting_utils.py @@ -269,7 +269,8 @@ def getJenkinsUrl(build_tag): url_base = get_config('functest.jenkins_url') try: build_id = [int(s) for s in build_tag.split("-") if s.isdigit()] - url_id = build_tag[8:-(len(build_id) + 3)] + "/" + str(build_id[0]) + url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] + + "/" + str(build_id[0])) jenkins_url = url_base + url_id + "/console" except: print('Impossible to get jenkins url:') diff --git a/utils/test/testapi/etc/config.ini b/utils/test/testapi/etc/config.ini index 0edb73a3f..77cc6c6ee 100644 --- a/utils/test/testapi/etc/config.ini +++ b/utils/test/testapi/etc/config.ini @@ -11,6 +11,7 @@ dbname = test_results_collection port = 8000 # With debug_on set to true, error traces will be shown in HTTP responses debug = True +authenticate = False [swagger] base_url = http://localhost:8000 diff --git a/utils/test/testapi/opnfv_testapi/cmd/server.py b/utils/test/testapi/opnfv_testapi/cmd/server.py index c3d734607..013ee6642 100644 --- a/utils/test/testapi/opnfv_testapi/cmd/server.py +++ b/utils/test/testapi/opnfv_testapi/cmd/server.py @@ -31,19 +31,19 @@ TODOs : import argparse -import tornado.ioloop import motor +import tornado.ioloop -from opnfv_testapi.common.config import APIConfig -from opnfv_testapi.tornado_swagger import swagger +from opnfv_testapi.common import config from opnfv_testapi.router import url_mappings +from opnfv_testapi.tornado_swagger import swagger # optionally get config file from command line parser = argparse.ArgumentParser() parser.add_argument("-c", "--config-file", dest='config_file', help="Config file location") args = parser.parse_args() -CONF = APIConfig().parse(args.config_file) +CONF = config.APIConfig().parse(args.config_file) # connecting to MongoDB server, and choosing database client = motor.MotorClient(CONF.mongo_url) @@ -57,6 +57,7 @@ def make_app(): url_mappings.mappings, db=db, debug=CONF.api_debug_on, + auth=CONF.api_authenticate_on ) diff --git a/utils/test/testapi/opnfv_testapi/common/config.py b/utils/test/testapi/opnfv_testapi/common/config.py index ecab88ae3..84a127391 100644 --- a/utils/test/testapi/opnfv_testapi/common/config.py +++ b/utils/test/testapi/opnfv_testapi/common/config.py @@ -7,9 +7,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 # feng.xiaowei@zte.com.cn remove prepare_put_request 5-30-2016 ############################################################################## - - -from ConfigParser import SafeConfigParser, NoOptionError +import ConfigParser class ParseError(Exception): @@ -36,13 +34,14 @@ class APIConfig: self.mongo_dbname = None self.api_port = None self.api_debug_on = None + self.api_authenticate_on = None self._parser = None self.swagger_base_url = None def _get_parameter(self, section, param): try: return self._parser.get(section, param) - except NoOptionError: + except ConfigParser.NoOptionError: raise ParseError("[%s.%s] parameter not found" % (section, param)) def _get_int_parameter(self, section, param): @@ -68,7 +67,7 @@ class APIConfig: if config_location is None: config_location = obj._default_config_location - obj._parser = SafeConfigParser() + obj._parser = ConfigParser.SafeConfigParser() obj._parser.read(config_location) if not obj._parser: raise ParseError("%s not found" % config_location) @@ -79,6 +78,9 @@ class APIConfig: obj.api_port = obj._get_int_parameter("api", "port") obj.api_debug_on = obj._get_bool_parameter("api", "debug") + obj.api_authenticate_on = obj._get_bool_parameter("api", + "authenticate") + obj.swagger_base_url = obj._get_parameter("swagger", "base_url") return obj @@ -92,4 +94,5 @@ class APIConfig: self.mongo_dbname, self.api_port, self.api_debug_on, + self.api_authenticate_on, self.swagger_base_url) diff --git a/utils/test/testapi/opnfv_testapi/common/constants.py b/utils/test/testapi/opnfv_testapi/common/constants.py index 4d39a142d..71bd95216 100644 --- a/utils/test/testapi/opnfv_testapi/common/constants.py +++ b/utils/test/testapi/opnfv_testapi/common/constants.py @@ -10,6 +10,7 @@ DEFAULT_REPRESENTATION = "application/json" HTTP_BAD_REQUEST = 400 +HTTP_UNAUTHORIZED = 401 HTTP_FORBIDDEN = 403 HTTP_NOT_FOUND = 404 HTTP_OK = 200 diff --git a/utils/test/testapi/opnfv_testapi/resources/handlers.py b/utils/test/testapi/opnfv_testapi/resources/handlers.py index a2628e249..8255b526a 100644 --- a/utils/test/testapi/opnfv_testapi/resources/handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/handlers.py @@ -20,19 +20,19 @@ # feng.xiaowei@zte.com.cn remove DashboardHandler 5-30-2016 ############################################################################## -import json from datetime import datetime +import functools +import json from tornado import gen -from tornado.web import RequestHandler, asynchronous, HTTPError +from tornado import web -from models import CreateResponse -from opnfv_testapi.common.constants import DEFAULT_REPRESENTATION, \ - HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_FORBIDDEN +import models +from opnfv_testapi.common import constants from opnfv_testapi.tornado_swagger import swagger -class GenericApiHandler(RequestHandler): +class GenericApiHandler(web.RequestHandler): def __init__(self, application, request, **kwargs): super(GenericApiHandler, self).__init__(application, request, **kwargs) self.db = self.settings["db"] @@ -44,49 +44,71 @@ class GenericApiHandler(RequestHandler): self.db_testcases = 'testcases' self.db_results = 'results' self.db_scenarios = 'scenarios' + self.auth = self.settings["auth"] def prepare(self): if self.request.method != "GET" and self.request.method != "DELETE": if self.request.headers.get("Content-Type") is not None: if self.request.headers["Content-Type"].startswith( - DEFAULT_REPRESENTATION): + constants.DEFAULT_REPRESENTATION): try: self.json_args = json.loads(self.request.body) except (ValueError, KeyError, TypeError) as error: - raise HTTPError(HTTP_BAD_REQUEST, - "Bad Json format [{}]". - format(error)) + raise web.HTTPError(constants.HTTP_BAD_REQUEST, + "Bad Json format [{}]". + format(error)) def finish_request(self, json_object=None): if json_object: self.write(json.dumps(json_object)) - self.set_header("Content-Type", DEFAULT_REPRESENTATION) + self.set_header("Content-Type", constants.DEFAULT_REPRESENTATION) self.finish() def _create_response(self, resource): href = self.request.full_url() + '/' + str(resource) - return CreateResponse(href=href).format() + return models.CreateResponse(href=href).format() def format_data(self, data): cls_data = self.table_cls.from_dict(data) return cls_data.format_http() - @asynchronous + def authenticate(method): + @web.asynchronous + @gen.coroutine + @functools.wraps(method) + def wrapper(self, *args, **kwargs): + if self.auth: + try: + token = self.request.headers['X-Auth-Token'] + except KeyError: + raise web.HTTPError(constants.HTTP_UNAUTHORIZED, + "No Authentication Header.") + query = {'access_token': token} + check = yield self._eval_db_find_one(query, 'tokens') + if not check: + raise web.HTTPError(constants.HTTP_FORBIDDEN, + "Invalid Token.") + ret = yield gen.coroutine(method)(self, *args, **kwargs) + raise gen.Return(ret) + return wrapper + + @web.asynchronous @gen.coroutine + @authenticate def _create(self, miss_checks, db_checks, **kwargs): """ :param miss_checks: [miss1, miss2] :param db_checks: [(table, exist, query, error)] """ if self.json_args is None: - raise HTTPError(HTTP_BAD_REQUEST, "no body") + raise web.HTTPError(constants.HTTP_BAD_REQUEST, "no body") data = self.table_cls.from_dict(self.json_args) for miss in miss_checks: miss_data = data.__getattribute__(miss) if miss_data is None or miss_data == '': - raise HTTPError(HTTP_BAD_REQUEST, - '{} missing'.format(miss)) + raise web.HTTPError(constants.HTTP_BAD_REQUEST, + '{} missing'.format(miss)) for k, v in kwargs.iteritems(): data.__setattr__(k, v) @@ -95,7 +117,7 @@ class GenericApiHandler(RequestHandler): check = yield self._eval_db_find_one(query(data), table) if (exist and not check) or (not exist and check): code, message = error(data) - raise HTTPError(code, message) + raise web.HTTPError(code, message) if self.table != 'results': data.creation_date = datetime.now() @@ -107,7 +129,7 @@ class GenericApiHandler(RequestHandler): resource = _id self.finish_request(self._create_response(resource)) - @asynchronous + @web.asynchronous @gen.coroutine def _list(self, query=None, res_op=None, *args, **kwargs): if query is None: @@ -126,40 +148,42 @@ class GenericApiHandler(RequestHandler): res = res_op(data, *args) self.finish_request(res) - @asynchronous + @web.asynchronous @gen.coroutine def _get_one(self, query): data = yield self._eval_db_find_one(query) if data is None: - raise HTTPError(HTTP_NOT_FOUND, - "[{}] not exist in table [{}]" - .format(query, self.table)) + raise web.HTTPError(constants.HTTP_NOT_FOUND, + "[{}] not exist in table [{}]" + .format(query, self.table)) self.finish_request(self.format_data(data)) - @asynchronous + @web.asynchronous @gen.coroutine + @authenticate def _delete(self, query): data = yield self._eval_db_find_one(query) if data is None: - raise HTTPError(HTTP_NOT_FOUND, - "[{}] not exit in table [{}]" - .format(query, self.table)) + raise web.HTTPError(constants.HTTP_NOT_FOUND, + "[{}] not exit in table [{}]" + .format(query, self.table)) yield self._eval_db(self.table, 'remove', query) self.finish_request() - @asynchronous + @web.asynchronous @gen.coroutine + @authenticate def _update(self, query, db_keys): if self.json_args is None: - raise HTTPError(HTTP_BAD_REQUEST, "No payload") + raise web.HTTPError(constants.HTTP_BAD_REQUEST, "No payload") # check old data exist from_data = yield self._eval_db_find_one(query) if from_data is None: - raise HTTPError(HTTP_NOT_FOUND, - "{} could not be found in table [{}]" - .format(query, self.table)) + raise web.HTTPError(constants.HTTP_NOT_FOUND, + "{} could not be found in table [{}]" + .format(query, self.table)) data = self.table_cls.from_dict(from_data) # check new data exist @@ -167,9 +191,9 @@ class GenericApiHandler(RequestHandler): if not equal: to_data = yield self._eval_db_find_one(new_query) if to_data is not None: - raise HTTPError(HTTP_FORBIDDEN, - "{} already exists in table [{}]" - .format(new_query, self.table)) + raise web.HTTPError(constants.HTTP_FORBIDDEN, + "{} already exists in table [{}]" + .format(new_query, self.table)) # we merge the whole document """ edit_request = self._update_requests(data) @@ -186,7 +210,7 @@ class GenericApiHandler(RequestHandler): request = self._update_request(request, k, v, data.__getattribute__(k)) if not request: - raise HTTPError(HTTP_FORBIDDEN, "Nothing to update") + raise web.HTTPError(constants.HTTP_FORBIDDEN, "Nothing to update") edit_request = data.format() edit_request.update(request) diff --git a/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py b/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py index e1bd9d359..65c27f60a 100644 --- a/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/pod_handlers.py @@ -6,17 +6,17 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +import handlers +from opnfv_testapi.common import constants from opnfv_testapi.tornado_swagger import swagger -from handlers import GenericApiHandler -from pod_models import Pod -from opnfv_testapi.common.constants import HTTP_FORBIDDEN +import pod_models -class GenericPodHandler(GenericApiHandler): +class GenericPodHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericPodHandler, self).__init__(application, request, **kwargs) self.table = 'pods' - self.table_cls = Pod + self.table_cls = pod_models.Pod class PodCLHandler(GenericPodHandler): @@ -46,7 +46,7 @@ class PodCLHandler(GenericPodHandler): def error(data): message = '{} already exists as a pod'.format(data.name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message miss_checks = ['name'] db_checks = [(self.table, False, query, error)] diff --git a/utils/test/testapi/opnfv_testapi/resources/project_handlers.py b/utils/test/testapi/opnfv_testapi/resources/project_handlers.py index 94c65b722..f3521961d 100644 --- a/utils/test/testapi/opnfv_testapi/resources/project_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/project_handlers.py @@ -6,19 +6,19 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +import handlers +from opnfv_testapi.common import constants from opnfv_testapi.tornado_swagger import swagger -from handlers import GenericApiHandler -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from project_models import Project +import project_models -class GenericProjectHandler(GenericApiHandler): +class GenericProjectHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericProjectHandler, self).__init__(application, request, **kwargs) self.table = 'projects' - self.table_cls = Project + self.table_cls = project_models.Project class ProjectCLHandler(GenericProjectHandler): @@ -48,7 +48,7 @@ class ProjectCLHandler(GenericProjectHandler): def error(data): message = '{} already exists as a project'.format(data.name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message miss_checks = ['name'] db_checks = [(self.table, False, query, error)] diff --git a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py index 2a1ed56ee..d41ba4820 100644 --- a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py @@ -6,30 +6,32 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from datetime import datetime, timedelta +from datetime import datetime +from datetime import timedelta -from bson.objectid import ObjectId -from tornado.web import HTTPError +from bson import objectid +from tornado import web -from opnfv_testapi.common.constants import HTTP_BAD_REQUEST, HTTP_NOT_FOUND -from opnfv_testapi.resources.handlers import GenericApiHandler -from opnfv_testapi.resources.result_models import TestResult +from opnfv_testapi.common import constants +from opnfv_testapi.resources import handlers +from opnfv_testapi.resources import result_models from opnfv_testapi.tornado_swagger import swagger -class GenericResultHandler(GenericApiHandler): +class GenericResultHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericResultHandler, self).__init__(application, request, **kwargs) self.table = self.db_results - self.table_cls = TestResult + self.table_cls = result_models.TestResult def get_int(self, key, value): try: value = int(value) except: - raise HTTPError(HTTP_BAD_REQUEST, '{} must be int'.format(key)) + raise web.HTTPError(constants.HTTP_BAD_REQUEST, + '{} must be int'.format(key)) return value def set_query(self): @@ -144,14 +146,14 @@ class ResultsCLHandler(GenericResultHandler): def pod_error(data): message = 'Could not find pod [{}]'.format(data.pod_name) - return HTTP_NOT_FOUND, message + return constants.HTTP_NOT_FOUND, message def project_query(data): return {'name': data.project_name} def project_error(data): message = 'Could not find project [{}]'.format(data.project_name) - return HTTP_NOT_FOUND, message + return constants.HTTP_NOT_FOUND, message def testcase_query(data): return {'project_name': data.project_name, 'name': data.case_name} @@ -159,7 +161,7 @@ class ResultsCLHandler(GenericResultHandler): def testcase_error(data): message = 'Could not find testcase [{}] in project [{}]'\ .format(data.case_name, data.project_name) - return HTTP_NOT_FOUND, message + return constants.HTTP_NOT_FOUND, message miss_checks = ['pod_name', 'project_name', 'case_name'] db_checks = [('pods', True, pod_query, pod_error), @@ -178,7 +180,7 @@ class ResultsGURHandler(GenericResultHandler): @raise 404: test result not exist """ query = dict() - query["_id"] = ObjectId(result_id) + query["_id"] = objectid.ObjectId(result_id) self._get_one(query) @swagger.operation(nickname="updateTestResultById") @@ -193,6 +195,6 @@ class ResultsGURHandler(GenericResultHandler): @raise 404: result not exist @raise 403: nothing to update """ - query = {'_id': ObjectId(result_id)} + query = {'_id': objectid.ObjectId(result_id)} db_keys = [] self._update(query, db_keys) diff --git a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py index a8c1a94fe..083bf59fc 100644 --- a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py @@ -1,17 +1,16 @@ -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from opnfv_testapi.resources.handlers import GenericApiHandler -from opnfv_testapi.resources.scenario_models import Scenario +from opnfv_testapi.common import constants +from opnfv_testapi.resources import handlers import opnfv_testapi.resources.scenario_models as models from opnfv_testapi.tornado_swagger import swagger -class GenericScenarioHandler(GenericApiHandler): +class GenericScenarioHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericScenarioHandler, self).__init__(application, request, **kwargs) self.table = self.db_scenarios - self.table_cls = Scenario + self.table_cls = models.Scenario class ScenariosCLHandler(GenericScenarioHandler): @@ -81,7 +80,7 @@ class ScenariosCLHandler(GenericScenarioHandler): def error(data): message = '{} already exists as a scenario'.format(data.name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message miss_checks = ['name'] db_checks = [(self.table, False, query, error)] @@ -116,6 +115,17 @@ class ScenarioGURHandler(GenericScenarioHandler): db_keys = ['name'] self._update(query, db_keys) + @swagger.operation(nickname="deleteScenarioByName") + def delete(self, name): + """ + @description: delete a scenario by name + @return 200: delete success + @raise 404: scenario not exist: + """ + + query = {'name': name} + self._delete(query) + def _update_query(self, keys, data): query = dict() equal = True diff --git a/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py b/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py index 100a4fd91..3debd6918 100644 --- a/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py @@ -6,19 +6,19 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from opnfv_testapi.resources.handlers import GenericApiHandler -from opnfv_testapi.resources.testcase_models import Testcase +from opnfv_testapi.common import constants +from opnfv_testapi.resources import handlers +from opnfv_testapi.resources import testcase_models from opnfv_testapi.tornado_swagger import swagger -class GenericTestcaseHandler(GenericApiHandler): +class GenericTestcaseHandler(handlers.GenericApiHandler): def __init__(self, application, request, **kwargs): super(GenericTestcaseHandler, self).__init__(application, request, **kwargs) self.table = self.db_testcases - self.table_cls = Testcase + self.table_cls = testcase_models.Testcase class TestcaseCLHandler(GenericTestcaseHandler): @@ -58,12 +58,12 @@ class TestcaseCLHandler(GenericTestcaseHandler): def p_error(data): message = 'Could not find project [{}]'.format(data.project_name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message def tc_error(data): message = '{} already exists as a testcase in project {}'\ .format(data.name, data.project_name) - return HTTP_FORBIDDEN, message + return constants.HTTP_FORBIDDEN, message miss_checks = ['name'] db_checks = [(self.db_projects, True, p_query, p_error), diff --git a/utils/test/testapi/opnfv_testapi/router/url_mappings.py b/utils/test/testapi/opnfv_testapi/router/url_mappings.py index 0ae3c31c3..39cf006af 100644 --- a/utils/test/testapi/opnfv_testapi/router/url_mappings.py +++ b/utils/test/testapi/opnfv_testapi/router/url_mappings.py @@ -6,37 +6,34 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from opnfv_testapi.resources.handlers import VersionHandler -from opnfv_testapi.resources.testcase_handlers import TestcaseCLHandler, \ - TestcaseGURHandler -from opnfv_testapi.resources.pod_handlers import PodCLHandler, PodGURHandler -from opnfv_testapi.resources.project_handlers import ProjectCLHandler, \ - ProjectGURHandler -from opnfv_testapi.resources.result_handlers import ResultsCLHandler, \ - ResultsGURHandler -from opnfv_testapi.resources.scenario_handlers import ScenariosCLHandler -from opnfv_testapi.resources.scenario_handlers import ScenarioGURHandler +from opnfv_testapi.resources import handlers +from opnfv_testapi.resources import pod_handlers +from opnfv_testapi.resources import project_handlers +from opnfv_testapi.resources import result_handlers +from opnfv_testapi.resources import scenario_handlers +from opnfv_testapi.resources import testcase_handlers mappings = [ # GET /versions => GET API version - (r"/versions", VersionHandler), + (r"/versions", handlers.VersionHandler), # few examples: # GET /api/v1/pods => Get all pods # GET /api/v1/pods/1 => Get details on POD 1 - (r"/api/v1/pods", PodCLHandler), - (r"/api/v1/pods/([^/]+)", PodGURHandler), + (r"/api/v1/pods", pod_handlers.PodCLHandler), + (r"/api/v1/pods/([^/]+)", pod_handlers.PodGURHandler), # few examples: # GET /projects # GET /projects/yardstick - (r"/api/v1/projects", ProjectCLHandler), - (r"/api/v1/projects/([^/]+)", ProjectGURHandler), + (r"/api/v1/projects", project_handlers.ProjectCLHandler), + (r"/api/v1/projects/([^/]+)", project_handlers.ProjectGURHandler), # few examples # GET /projects/qtip/cases => Get cases for qtip - (r"/api/v1/projects/([^/]+)/cases", TestcaseCLHandler), - (r"/api/v1/projects/([^/]+)/cases/([^/]+)", TestcaseGURHandler), + (r"/api/v1/projects/([^/]+)/cases", testcase_handlers.TestcaseCLHandler), + (r"/api/v1/projects/([^/]+)/cases/([^/]+)", + testcase_handlers.TestcaseGURHandler), # new path to avoid a long depth # GET /results?project=functest&case=keystone.catalog&pod=1 @@ -44,10 +41,10 @@ mappings = [ # POST /results => # Push results with mandatory request payload parameters # (project, case, and pod) - (r"/api/v1/results", ResultsCLHandler), - (r"/api/v1/results/([^/]+)", ResultsGURHandler), + (r"/api/v1/results", result_handlers.ResultsCLHandler), + (r"/api/v1/results/([^/]+)", result_handlers.ResultsGURHandler), # scenarios - (r"/api/v1/scenarios", ScenariosCLHandler), - (r"/api/v1/scenarios/([^/]+)", ScenarioGURHandler), + (r"/api/v1/scenarios", scenario_handlers.ScenariosCLHandler), + (r"/api/v1/scenarios/([^/]+)", scenario_handlers.ScenarioGURHandler), ] diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py b/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py index 3c4fd01a3..ef74a0857 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py @@ -242,3 +242,4 @@ projects = MemDb('projects') testcases = MemDb('testcases') results = MemDb('results') scenarios = MemDb('scenarios') +tokens = MemDb('tokens') diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py index fc780e44c..b2be8d593 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_base.py @@ -8,20 +8,20 @@ ############################################################################## import json -from tornado.web import Application -from tornado.testing import AsyncHTTPTestCase +from tornado import testing +from tornado import web -from opnfv_testapi.router import url_mappings -from opnfv_testapi.resources.models import CreateResponse import fake_pymongo +from opnfv_testapi.resources import models +from opnfv_testapi.router import url_mappings -class TestBase(AsyncHTTPTestCase): +class TestBase(testing.AsyncHTTPTestCase): headers = {'Content-Type': 'application/json; charset=UTF-8'} def setUp(self): self.basePath = '' - self.create_res = CreateResponse + self.create_res = models.CreateResponse self.get_res = None self.list_res = None self.update_res = None @@ -31,10 +31,11 @@ class TestBase(AsyncHTTPTestCase): super(TestBase, self).setUp() def get_app(self): - return Application( + return web.Application( url_mappings.mappings, db=fake_pymongo, debug=True, + auth=False ) def create_d(self, *args): diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py index 5f50ba867..7c43fca62 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py @@ -9,13 +9,13 @@ import unittest from tornado import gen -from tornado.testing import AsyncHTTPTestCase, gen_test -from tornado.web import Application +from tornado import testing +from tornado import web import fake_pymongo -class MyTest(AsyncHTTPTestCase): +class MyTest(testing.AsyncHTTPTestCase): def setUp(self): super(MyTest, self).setUp() self.db = fake_pymongo @@ -23,7 +23,7 @@ class MyTest(AsyncHTTPTestCase): self.io_loop.run_sync(self.fixture_setup) def get_app(self): - return Application() + return web.Application() @gen.coroutine def fixture_setup(self): @@ -32,13 +32,13 @@ class MyTest(AsyncHTTPTestCase): yield self.db.pods.insert({'_id': '1', 'name': 'test1'}) yield self.db.pods.insert({'name': 'test2'}) - @gen_test + @testing.gen_test def test_find_one(self): user = yield self.db.pods.find_one({'name': 'test1'}) self.assertEqual(user, self.test1) self.db.pods.remove() - @gen_test + @testing.gen_test def test_find(self): cursor = self.db.pods.find() names = [] @@ -47,7 +47,7 @@ class MyTest(AsyncHTTPTestCase): names.append(ob.get('name')) self.assertItemsEqual(names, ['test1', 'test2']) - @gen_test + @testing.gen_test def test_update(self): yield self.db.pods.update({'_id': '1'}, {'name': 'new_test1'}) user = yield self.db.pods.find_one({'_id': '1'}) @@ -71,7 +71,7 @@ class MyTest(AsyncHTTPTestCase): None, check_keys=False) - @gen_test + @testing.gen_test def test_remove(self): yield self.db.pods.remove({'_id': '1'}) user = yield self.db.pods.find_one({'_id': '1'}) @@ -104,7 +104,7 @@ class MyTest(AsyncHTTPTestCase): def _insert_assert(self, docs, error=None, **kwargs): self._db_assert('insert', error, docs, **kwargs) - @gen_test + @testing.gen_test def _db_assert(self, method, error, *args, **kwargs): name_error = None try: diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py index a1184d554..922bd46e2 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py @@ -8,20 +8,19 @@ ############################################################################## import unittest -from test_base import TestBase -from opnfv_testapi.resources.pod_models import PodCreateRequest, Pod, Pods -from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ - HTTP_FORBIDDEN, HTTP_NOT_FOUND +from opnfv_testapi.common import constants +from opnfv_testapi.resources import pod_models +import test_base as base -class TestPodBase(TestBase): +class TestPodBase(base.TestBase): def setUp(self): super(TestPodBase, self).setUp() - self.req_d = PodCreateRequest('zte-1', 'virtual', - 'zte pod 1', 'ci-pod') - self.req_e = PodCreateRequest('zte-2', 'metal', 'zte pod 2') - self.get_res = Pod - self.list_res = Pods + self.req_d = pod_models.PodCreateRequest('zte-1', 'virtual', + 'zte pod 1', 'ci-pod') + self.req_e = pod_models.PodCreateRequest('zte-2', 'metal', 'zte pod 2') + self.get_res = pod_models.Pod + self.list_res = pod_models.Pods self.basePath = '/api/v1/pods' def assert_get_body(self, pod, req=None): @@ -38,36 +37,36 @@ class TestPodBase(TestBase): class TestPodCreate(TestPodBase): def test_withoutBody(self): (code, body) = self.create() - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_emptyName(self): - req_empty = PodCreateRequest('') + req_empty = pod_models.PodCreateRequest('') (code, body) = self.create(req_empty) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): - req_none = PodCreateRequest(None) + req_none = pod_models.PodCreateRequest(None) (code, body) = self.create(req_none) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): code, body = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body) def test_alreadyExist(self): self.create_d() code, body = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) class TestPodGet(TestPodBase): def test_notExist(self): code, body = self.get('notExist') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_getOne(self): self.create_d() diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py index 327ddf7b2..afd4a6601 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_project.py @@ -8,21 +8,21 @@ ############################################################################## import unittest -from test_base import TestBase -from opnfv_testapi.resources.project_models import ProjectCreateRequest, \ - Project, Projects, ProjectUpdateRequest -from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ - HTTP_FORBIDDEN, HTTP_NOT_FOUND +from opnfv_testapi.common import constants +from opnfv_testapi.resources import project_models +import test_base as base -class TestProjectBase(TestBase): +class TestProjectBase(base.TestBase): def setUp(self): super(TestProjectBase, self).setUp() - self.req_d = ProjectCreateRequest('vping', 'vping-ssh test') - self.req_e = ProjectCreateRequest('doctor', 'doctor test') - self.get_res = Project - self.list_res = Projects - self.update_res = Project + self.req_d = project_models.ProjectCreateRequest('vping', + 'vping-ssh test') + self.req_e = project_models.ProjectCreateRequest('doctor', + 'doctor test') + self.get_res = project_models.Project + self.list_res = project_models.Projects + self.update_res = project_models.Project self.basePath = '/api/v1/projects' def assert_body(self, project, req=None): @@ -37,41 +37,41 @@ class TestProjectBase(TestBase): class TestProjectCreate(TestProjectBase): def test_withoutBody(self): (code, body) = self.create() - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_emptyName(self): - req_empty = ProjectCreateRequest('') + req_empty = project_models.ProjectCreateRequest('') (code, body) = self.create(req_empty) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): - req_none = ProjectCreateRequest(None) + req_none = project_models.ProjectCreateRequest(None) (code, body) = self.create(req_none) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): (code, body) = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body) def test_alreadyExist(self): self.create_d() (code, body) = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) class TestProjectGet(TestProjectBase): def test_notExist(self): code, body = self.get('notExist') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_getOne(self): self.create_d() code, body = self.get(self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_body(body) def test_list(self): @@ -88,23 +88,23 @@ class TestProjectGet(TestProjectBase): class TestProjectUpdate(TestProjectBase): def test_withoutBody(self): code, _ = self.update(None, 'noBody') - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_notFound(self): code, _ = self.update(self.req_e, 'notFound') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_newNameExist(self): self.create_d() self.create_e() code, body = self.update(self.req_e, self.req_d.name) - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn("already exists", body) def test_noUpdate(self): self.create_d() code, body = self.update(self.req_d, self.req_d.name) - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn("Nothing to update", body) def test_success(self): @@ -112,9 +112,9 @@ class TestProjectUpdate(TestProjectBase): code, body = self.get(self.req_d.name) _id = body._id - req = ProjectUpdateRequest('newName', 'new description') + req = project_models.ProjectUpdateRequest('newName', 'new description') code, body = self.update(req, self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(_id, body._id) self.assert_body(body, req) @@ -126,16 +126,16 @@ class TestProjectUpdate(TestProjectBase): class TestProjectDelete(TestProjectBase): def test_notFound(self): code, body = self.delete('notFound') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_success(self): self.create_d() code, body = self.delete(self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(body, '') code, body = self.get(self.req_d.name) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) if __name__ == '__main__': unittest.main() diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py index 10575a9f5..2c7268eb6 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_result.py @@ -7,17 +7,15 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import copy -import unittest from datetime import datetime, timedelta +import unittest -from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ - HTTP_NOT_FOUND -from opnfv_testapi.resources.pod_models import PodCreateRequest -from opnfv_testapi.resources.project_models import ProjectCreateRequest -from opnfv_testapi.resources.result_models import ResultCreateRequest, \ - TestResult, TestResults, ResultUpdateRequest, TI, TIHistory -from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest -from test_base import TestBase +from opnfv_testapi.common import constants +from opnfv_testapi.resources import pod_models +from opnfv_testapi.resources import project_models +from opnfv_testapi.resources import result_models +from opnfv_testapi.resources import testcase_models +import test_base as base class Details(object): @@ -49,7 +47,7 @@ class Details(object): return t -class TestResultBase(TestBase): +class TestResultBase(base.TestBase): def setUp(self): self.pod = 'zte-pod1' self.project = 'functest' @@ -59,34 +57,41 @@ class TestResultBase(TestBase): self.build_tag = 'v3.0' self.scenario = 'odl-l2' self.criteria = 'passed' - self.trust_indicator = TI(0.7) + self.trust_indicator = result_models.TI(0.7) self.start_date = "2016-05-23 07:16:09.477097" self.stop_date = "2016-05-23 07:16:19.477097" self.update_date = "2016-05-24 07:16:19.477097" self.update_step = -0.05 super(TestResultBase, self).setUp() self.details = Details(timestart='0', duration='9s', status='OK') - self.req_d = ResultCreateRequest(pod_name=self.pod, - project_name=self.project, - case_name=self.case, - installer=self.installer, - version=self.version, - start_date=self.start_date, - stop_date=self.stop_date, - details=self.details.format(), - build_tag=self.build_tag, - scenario=self.scenario, - criteria=self.criteria, - trust_indicator=self.trust_indicator) - self.get_res = TestResult - self.list_res = TestResults - self.update_res = TestResult + self.req_d = result_models.ResultCreateRequest( + pod_name=self.pod, + project_name=self.project, + case_name=self.case, + installer=self.installer, + version=self.version, + start_date=self.start_date, + stop_date=self.stop_date, + details=self.details.format(), + build_tag=self.build_tag, + scenario=self.scenario, + criteria=self.criteria, + trust_indicator=self.trust_indicator) + self.get_res = result_models.TestResult + self.list_res = result_models.TestResults + self.update_res = result_models.TestResult self.basePath = '/api/v1/results' - self.req_pod = PodCreateRequest(self.pod, 'metal', 'zte pod 1') - self.req_project = ProjectCreateRequest(self.project, 'vping test') - self.req_testcase = TestcaseCreateRequest(self.case, - '/cases/vping', - 'vping-ssh test') + self.req_pod = pod_models.PodCreateRequest( + self.pod, + 'metal', + 'zte pod 1') + self.req_project = project_models.ProjectCreateRequest( + self.project, + 'vping test') + self.req_testcase = testcase_models.TestcaseCreateRequest( + self.case, + '/cases/vping', + 'vping-ssh test') self.create_help('/api/v1/pods', self.req_pod) self.create_help('/api/v1/projects', self.req_project) self.create_help('/api/v1/projects/%s/cases', @@ -94,7 +99,7 @@ class TestResultBase(TestBase): self.project) def assert_res(self, code, result, req=None): - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) if req is None: req = self.req_d self.assertEqual(result.pod_name, req.pod_name) @@ -129,78 +134,78 @@ class TestResultBase(TestBase): class TestResultCreate(TestResultBase): def test_nobody(self): (code, body) = self.create(None) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('no body', body) def test_podNotProvided(self): req = self.req_d req.pod_name = None (code, body) = self.create(req) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('pod_name missing', body) def test_projectNotProvided(self): req = self.req_d req.project_name = None (code, body) = self.create(req) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('project_name missing', body) def test_testcaseNotProvided(self): req = self.req_d req.case_name = None (code, body) = self.create(req) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('case_name missing', body) def test_noPod(self): req = self.req_d req.pod_name = 'notExistPod' (code, body) = self.create(req) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) self.assertIn('Could not find pod', body) def test_noProject(self): req = self.req_d req.project_name = 'notExistProject' (code, body) = self.create(req) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) self.assertIn('Could not find project', body) def test_noTestcase(self): req = self.req_d req.case_name = 'notExistTestcase' (code, body) = self.create(req) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) self.assertIn('Could not find testcase', body) def test_success(self): (code, body) = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_href(body) def test_key_with_doc(self): req = copy.deepcopy(self.req_d) req.details = {'1.name': 'dot_name'} (code, body) = self.create(req) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_href(body) def test_no_ti(self): - req = ResultCreateRequest(pod_name=self.pod, - project_name=self.project, - case_name=self.case, - installer=self.installer, - version=self.version, - start_date=self.start_date, - stop_date=self.stop_date, - details=self.details.format(), - build_tag=self.build_tag, - scenario=self.scenario, - criteria=self.criteria) + req = result_models.ResultCreateRequest(pod_name=self.pod, + project_name=self.project, + case_name=self.case, + installer=self.installer, + version=self.version, + start_date=self.start_date, + stop_date=self.stop_date, + details=self.details.format(), + build_tag=self.build_tag, + scenario=self.scenario, + criteria=self.criteria) (code, res) = self.create(req) _id = res.href.split('/')[-1] - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) code, body = self.get(_id) self.assert_res(code, body, req) @@ -240,7 +245,7 @@ class TestResultGet(TestResultBase): def test_queryPeriodNotInt(self): code, body = self.query(self._set_query('period=a')) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('period must be int', body) def test_queryPeriodFail(self): @@ -253,7 +258,7 @@ class TestResultGet(TestResultBase): def test_queryLastNotInt(self): code, body = self.query(self._set_query('last=a')) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('last must be int', body) def test_queryLast(self): @@ -292,7 +297,7 @@ class TestResultGet(TestResultBase): req = self._create_changed_date(**kwargs) code, body = self.query(query) if not found: - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(0, len(body.results)) else: self.assertEqual(1, len(body.results)) @@ -326,10 +331,11 @@ class TestResultUpdate(TestResultBase): new_ti = copy.deepcopy(self.trust_indicator) new_ti.current += self.update_step - new_ti.histories.append(TIHistory(self.update_date, self.update_step)) + new_ti.histories.append( + result_models.TIHistory(self.update_date, self.update_step)) new_data = copy.deepcopy(self.req_d) new_data.trust_indicator = new_ti - update = ResultUpdateRequest(trust_indicator=new_ti) + update = result_models.ResultUpdateRequest(trust_indicator=new_ti) code, body = self.update(update, _id) self.assertEqual(_id, body._id) self.assert_res(code, body, new_data) diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py index c15dc32ea..f604c5750 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py @@ -1,16 +1,14 @@ from copy import deepcopy +from datetime import datetime import json import os -from datetime import datetime -from opnfv_testapi.common.constants import HTTP_BAD_REQUEST -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from opnfv_testapi.common.constants import HTTP_OK +from opnfv_testapi.common import constants import opnfv_testapi.resources.scenario_models as models -from test_testcase import TestBase +import test_base as base -class TestScenarioBase(TestBase): +class TestScenarioBase(base.TestBase): def setUp(self): super(TestScenarioBase, self).setUp() self.get_res = models.Scenario @@ -38,7 +36,7 @@ class TestScenarioBase(TestBase): return res.href.split('/')[-1] def assert_res(self, code, scenario, req=None): - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) if req is None: req = self.req_d scenario_dict = scenario.format_http() @@ -61,29 +59,29 @@ class TestScenarioBase(TestBase): class TestScenarioCreate(TestScenarioBase): def test_withoutBody(self): (code, body) = self.create() - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_emptyName(self): req_empty = models.ScenarioCreateRequest('') (code, body) = self.create(req_empty) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): req_none = models.ScenarioCreateRequest(None) (code, body) = self.create(req_none) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): (code, body) = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body) def test_alreadyExist(self): self.create_d() (code, body) = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) @@ -126,7 +124,7 @@ class TestScenarioGet(TestScenarioBase): def _query_and_assert(self, query, found=True, reqs=None): code, body = self.query(query) if not found: - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(0, len(body.scenarios)) else: self.assertEqual(len(reqs), len(body.scenarios)) @@ -296,10 +294,23 @@ class TestScenarioUpdate(TestScenarioBase): def _update_and_assert(self, update_req, new_scenario, name=None): code, _ = self.update(update_req, self.scenario) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self._get_and_assert(self._none_default(name, self.scenario), new_scenario) @staticmethod def _none_default(check, default): return check if check else default + + +class TestScenarioDelete(TestScenarioBase): + def test_notFound(self): + code, body = self.delete('notFound') + self.assertEqual(code, constants.HTTP_NOT_FOUND) + + def test_success(self): + scenario = self.create_return_name(self.req_d) + code, _ = self.delete(scenario) + self.assertEqual(code, constants.HTTP_OK) + code, _ = self.get(scenario) + self.assertEqual(code, constants.HTTP_NOT_FOUND) diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py index cb767844a..c0494db5d 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py @@ -6,35 +6,33 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import unittest import copy +import unittest -from test_base import TestBase -from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest, \ - Testcase, Testcases, TestcaseUpdateRequest -from opnfv_testapi.resources.project_models import ProjectCreateRequest -from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ - HTTP_FORBIDDEN, HTTP_NOT_FOUND +from opnfv_testapi.common import constants +from opnfv_testapi.resources import project_models +from opnfv_testapi.resources import testcase_models +import test_base as base -class TestCaseBase(TestBase): +class TestCaseBase(base.TestBase): def setUp(self): super(TestCaseBase, self).setUp() - self.req_d = TestcaseCreateRequest('vping_1', - '/cases/vping_1', - 'vping-ssh test') - self.req_e = TestcaseCreateRequest('doctor_1', - '/cases/doctor_1', - 'create doctor') - self.update_d = TestcaseUpdateRequest('vping_1', - 'vping-ssh test', - 'functest') - self.update_e = TestcaseUpdateRequest('doctor_1', - 'create doctor', - 'functest') - self.get_res = Testcase - self.list_res = Testcases - self.update_res = Testcase + self.req_d = testcase_models.TestcaseCreateRequest('vping_1', + '/cases/vping_1', + 'vping-ssh test') + self.req_e = testcase_models.TestcaseCreateRequest('doctor_1', + '/cases/doctor_1', + 'create doctor') + self.update_d = testcase_models.TestcaseUpdateRequest('vping_1', + 'vping-ssh test', + 'functest') + self.update_e = testcase_models.TestcaseUpdateRequest('doctor_1', + 'create doctor', + 'functest') + self.get_res = testcase_models.Testcase + self.list_res = testcase_models.Testcases + self.update_res = testcase_models.Testcase self.basePath = '/api/v1/projects/%s/cases' self.create_project() @@ -57,7 +55,8 @@ class TestCaseBase(TestBase): self.assertIsNotNone(new.creation_date) def create_project(self): - req_p = ProjectCreateRequest('functest', 'vping-ssh test') + req_p = project_models.ProjectCreateRequest('functest', + 'vping-ssh test') self.create_help('/api/v1/projects', req_p) self.project = req_p.name @@ -80,46 +79,46 @@ class TestCaseBase(TestBase): class TestCaseCreate(TestCaseBase): def test_noBody(self): (code, body) = self.create(None, 'vping') - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_noProject(self): code, body = self.create(self.req_d, 'noProject') - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('Could not find project', body) def test_emptyName(self): - req_empty = TestcaseCreateRequest('') + req_empty = testcase_models.TestcaseCreateRequest('') (code, body) = self.create(req_empty, self.project) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): - req_none = TestcaseCreateRequest(None) + req_none = testcase_models.TestcaseCreateRequest(None) (code, body) = self.create(req_none, self.project) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): code, body = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body, None, self.project) def test_alreadyExist(self): self.create_d() code, body = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) class TestCaseGet(TestCaseBase): def test_notExist(self): code, body = self.get('notExist') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_getOne(self): self.create_d() code, body = self.get(self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_body(body) def test_list(self): @@ -136,23 +135,23 @@ class TestCaseGet(TestCaseBase): class TestCaseUpdate(TestCaseBase): def test_noBody(self): code, _ = self.update(case='noBody') - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_notFound(self): code, _ = self.update(self.update_e, 'notFound') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_newNameExist(self): self.create_d() self.create_e() code, body = self.update(self.update_e, self.req_d.name) - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn("already exists", body) def test_noUpdate(self): self.create_d() code, body = self.update(self.update_d, self.req_d.name) - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn("Nothing to update", body) def test_success(self): @@ -161,7 +160,7 @@ class TestCaseUpdate(TestCaseBase): _id = body._id code, body = self.update(self.update_e, self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(_id, body._id) self.assert_update_body(self.req_d, body, self.update_e) @@ -174,22 +173,22 @@ class TestCaseUpdate(TestCaseBase): update = copy.deepcopy(self.update_d) update.description = {'2. change': 'dollar change'} code, body = self.update(update, self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) class TestCaseDelete(TestCaseBase): def test_notFound(self): code, body = self.delete('notFound') - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) def test_success(self): self.create_d() code, body = self.delete(self.req_d.name) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(body, '') code, body = self.get(self.req_d.name) - self.assertEqual(code, HTTP_NOT_FOUND) + self.assertEqual(code, constants.HTTP_NOT_FOUND) if __name__ == '__main__': diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py new file mode 100644 index 000000000..19b9e3e07 --- /dev/null +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_token.py @@ -0,0 +1,118 @@ +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +import unittest + +from tornado import web + +import fake_pymongo +from opnfv_testapi.common import constants +from opnfv_testapi.resources import project_models +from opnfv_testapi.router import url_mappings +import test_base as base + + +class TestToken(base.TestBase): + def get_app(self): + return web.Application( + url_mappings.mappings, + db=fake_pymongo, + debug=True, + auth=True + ) + + +class TestTokenCreateProject(TestToken): + def setUp(self): + super(TestTokenCreateProject, self).setUp() + self.req_d = project_models.ProjectCreateRequest('vping') + fake_pymongo.tokens.insert({"access_token": "12345"}) + self.basePath = '/api/v1/projects' + + def test_projectCreateTokenInvalid(self): + self.headers['X-Auth-Token'] = '1234' + code, body = self.create_d() + self.assertEqual(code, constants.HTTP_FORBIDDEN) + self.assertIn('Invalid Token.', body) + + def test_projectCreateTokenUnauthorized(self): + self.headers.pop('X-Auth-Token') + code, body = self.create_d() + self.assertEqual(code, constants.HTTP_UNAUTHORIZED) + self.assertIn('No Authentication Header.', body) + + def test_projectCreateTokenSuccess(self): + self.headers['X-Auth-Token'] = '12345' + code, body = self.create_d() + self.assertEqual(code, constants.HTTP_OK) + + +class TestTokenDeleteProject(TestToken): + def setUp(self): + super(TestTokenDeleteProject, self).setUp() + self.req_d = project_models.ProjectCreateRequest('vping') + fake_pymongo.tokens.insert({"access_token": "12345"}) + self.basePath = '/api/v1/projects' + + def test_projectDeleteTokenIvalid(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + self.headers['X-Auth-Token'] = '1234' + code, body = self.delete(self.req_d.name) + self.assertEqual(code, constants.HTTP_FORBIDDEN) + self.assertIn('Invalid Token.', body) + + def test_projectDeleteTokenUnauthorized(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + self.headers.pop('X-Auth-Token') + code, body = self.delete(self.req_d.name) + self.assertEqual(code, constants.HTTP_UNAUTHORIZED) + self.assertIn('No Authentication Header.', body) + + def test_projectDeleteTokenSuccess(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + code, body = self.delete(self.req_d.name) + self.assertEqual(code, constants.HTTP_OK) + + +class TestTokenUpdateProject(TestToken): + def setUp(self): + super(TestTokenUpdateProject, self).setUp() + self.req_d = project_models.ProjectCreateRequest('vping') + fake_pymongo.tokens.insert({"access_token": "12345"}) + self.basePath = '/api/v1/projects' + + def test_projectUpdateTokenIvalid(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + code, body = self.get(self.req_d.name) + self.headers['X-Auth-Token'] = '1234' + req = project_models.ProjectUpdateRequest('newName', 'new description') + code, body = self.update(req, self.req_d.name) + self.assertEqual(code, constants.HTTP_FORBIDDEN) + self.assertIn('Invalid Token.', body) + + def test_projectUpdateTokenUnauthorized(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + code, body = self.get(self.req_d.name) + self.headers.pop('X-Auth-Token') + req = project_models.ProjectUpdateRequest('newName', 'new description') + code, body = self.update(req, self.req_d.name) + self.assertEqual(code, constants.HTTP_UNAUTHORIZED) + self.assertIn('No Authentication Header.', body) + + def test_projectUpdateTokenSuccess(self): + self.headers['X-Auth-Token'] = '12345' + self.create_d() + code, body = self.get(self.req_d.name) + req = project_models.ProjectUpdateRequest('newName', 'new description') + code, body = self.update(req, self.req_d.name) + self.assertEqual(code, constants.HTTP_OK) + +if __name__ == '__main__': + unittest.main() diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py index b6fbf45dc..c8f3f5062 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_version.py @@ -8,14 +8,14 @@ ############################################################################## import unittest -from test_base import TestBase -from opnfv_testapi.resources.models import Versions +from opnfv_testapi.resources import models +import test_base as base -class TestVersionBase(TestBase): +class TestVersionBase(base.TestBase): def setUp(self): super(TestVersionBase, self).setUp() - self.list_res = Versions + self.list_res = models.Versions self.basePath = '/versions' |