diff options
-rw-r--r-- | .gitmodules | 3 | ||||
-rwxr-xr-x | jjb/apex/apex-deploy.sh | 9 | ||||
-rwxr-xr-x | jjb/apex/apex-download-artifact.sh | 5 | ||||
-rw-r--r-- | jjb/apex/apex.yml | 19 | ||||
-rw-r--r-- | jjb/apex/apex.yml.j2 | 19 | ||||
-rwxr-xr-x | jjb/dovetail/dovetail-run.sh | 17 | ||||
m--------- | jjb/global-jjb | 0 | ||||
-rwxr-xr-x | jjb/xci/xci-run-functest.sh | 2 | ||||
-rw-r--r-- | jjb/xci/xci-verify-jobs.yml | 6 | ||||
-rw-r--r-- | modules/opnfv/deployment/compass/adapter_container.py | 83 | ||||
-rw-r--r-- | modules/opnfv/deployment/factory.py | 6 |
11 files changed, 151 insertions, 18 deletions
diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..07b28be4b --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "jjb/global-jjb"] + path = jjb/global-jjb + url = https://github.com/lfit/releng-global-jjb diff --git a/jjb/apex/apex-deploy.sh b/jjb/apex/apex-deploy.sh index b8ae75a1f..123db3e85 100755 --- a/jjb/apex/apex-deploy.sh +++ b/jjb/apex/apex-deploy.sh @@ -31,7 +31,9 @@ elif [[ "$DEPLOY_SCENARIO" == *gate* ]]; then fi # Dev or RPM/ISO build -if [[ "$ARTIFACT_VERSION" =~ dev ]]; then +# For upstream deployments we currently only use git repo and not RPM +# Need to decide after Fraser if we want to use RPM or not for upstream +if [[ "$ARTIFACT_VERSION" =~ dev || "$DEPLOY_SCENARIO" =~ "upstream" ]]; then # Settings for deploying from git workspace DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy" NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network" @@ -134,6 +136,11 @@ else DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}" fi +if [[ "$DEPLOY_SCENARIO" =~ "upstream" ]]; then + echo "Upstream deployment detected" + DEPLOY_CMD="${DEPLOY_CMD} --upstream" +fi + if [ "$IPV6_FLAG" == "True" ]; then NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml" elif echo ${DEPLOY_SCENARIO} | grep fdio; then diff --git a/jjb/apex/apex-download-artifact.sh b/jjb/apex/apex-download-artifact.sh index 68baf59da..c12406cdb 100755 --- a/jjb/apex/apex-download-artifact.sh +++ b/jjb/apex/apex-download-artifact.sh @@ -10,7 +10,10 @@ echo [[ -d $BUILD_DIRECTORY ]] || mkdir -p $BUILD_DIRECTORY -if [[ "$ARTIFACT_VERSION" =~ dev ]]; then +# if upstream we do not need to download anything +if [[ "$DEPLOY_SCENARIO" =~ upstream ]]; then + echo "Upstream deployment detected, skipping download artifact" +elif [[ "$ARTIFACT_VERSION" =~ dev ]]; then # dev build GERRIT_PATCHSET_NUMBER=$(echo $GERRIT_REFSPEC | grep -Eo '[0-9]+$') export OPNFV_ARTIFACT_VERSION="dev${GERRIT_CHANGE_NUMBER}_${GERRIT_PATCHSET_NUMBER}" diff --git a/jjb/apex/apex.yml b/jjb/apex/apex.yml index cf29b928c..e19a90e94 100644 --- a/jjb/apex/apex.yml +++ b/jjb/apex/apex.yml @@ -530,7 +530,7 @@ abort-all-job: false git-revision: false - multijob: - name: Dovetail + name: Dovetail-proposed_tests condition: ALWAYS projects: - name: 'dovetail-apex-baremetal-proposed_tests-{scenario_stream}' @@ -539,7 +539,22 @@ predefined-parameters: DEPLOY_SCENARIO=$DEPLOY_SCENARIO kill-phase-on: NEVER - enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/" + enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/ + && $BUILD_NUMBER % 2 == 1" + abort-all-job: false + git-revision: false + - multijob: + name: Dovetail-default + condition: ALWAYS + projects: + - name: 'dovetail-apex-baremetal-default-{scenario_stream}' + node-parameters: true + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO=$DEPLOY_SCENARIO + kill-phase-on: NEVER + enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/ + && $BUILD_NUMBER % 2 == 0" abort-all-job: false git-revision: false - multijob: diff --git a/jjb/apex/apex.yml.j2 b/jjb/apex/apex.yml.j2 index ab65c4e2b..ecc6f270c 100644 --- a/jjb/apex/apex.yml.j2 +++ b/jjb/apex/apex.yml.j2 @@ -408,7 +408,7 @@ abort-all-job: false git-revision: false - multijob: - name: Dovetail + name: Dovetail-proposed_tests condition: ALWAYS projects: - name: 'dovetail-apex-baremetal-proposed_tests-{scenario_stream}' @@ -417,7 +417,22 @@ predefined-parameters: DEPLOY_SCENARIO=$DEPLOY_SCENARIO kill-phase-on: NEVER - enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/" + enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/ + && $BUILD_NUMBER % 2 == 1" + abort-all-job: false + git-revision: false + - multijob: + name: Dovetail-default + condition: ALWAYS + projects: + - name: 'dovetail-apex-baremetal-default-{scenario_stream}' + node-parameters: true + current-parameters: false + predefined-parameters: + DEPLOY_SCENARIO=$DEPLOY_SCENARIO + kill-phase-on: NEVER + enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/ + && $BUILD_NUMBER % 2 == 0" abort-all-job: false git-revision: false - multijob: diff --git a/jjb/dovetail/dovetail-run.sh b/jjb/dovetail/dovetail-run.sh index ec879e325..a5a95f402 100755 --- a/jjb/dovetail/dovetail-run.sh +++ b/jjb/dovetail/dovetail-run.sh @@ -24,6 +24,9 @@ mkdir -p ${DOVETAIL_HOME} DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config mkdir -p ${DOVETAIL_CONFIG} +DOVETAIL_IMAGES=${DOVETAIL_HOME}/images +mkdir -p ${DOVETAIL_IMAGES} + ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" sshkey="" @@ -189,7 +192,7 @@ if [[ ! -f ${ubuntu_image} ]]; then echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..." wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path} fi -sudo cp ${ubuntu_image} ${DOVETAIL_CONFIG} +sudo cp ${ubuntu_image} ${DOVETAIL_IMAGES} # functest needs to download this image first before running cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img @@ -197,7 +200,7 @@ if [[ ! -f ${cirros_image} ]]; then echo "Download image cirros-0.3.5-x86_64-disk.img ..." wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path} fi -sudo cp ${cirros_image} ${DOVETAIL_CONFIG} +sudo cp ${cirros_image} ${DOVETAIL_IMAGES} # snaps_smoke test case needs to download this image first before running ubuntu14_image=${image_path}/ubuntu-14.04-server-cloudimg-amd64-disk1.img @@ -205,7 +208,7 @@ if [[ ! -f ${ubuntu14_image} ]]; then echo "Download image ubuntu-14.04-server-cloudimg-amd64-disk1.img ..." wget -q -nc https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${image_path} fi -sudo cp ${ubuntu14_image} ${DOVETAIL_CONFIG} +sudo cp ${ubuntu14_image} ${DOVETAIL_IMAGES} # cloudify_ims test case needs to download these 2 images first before running cloudify_image=${image_path}/cloudify-manager-premium-4.0.1.qcow2 @@ -213,13 +216,13 @@ if [[ ! -f ${cloudify_image} ]]; then echo "Download image cloudify-manager-premium-4.0.1.qcow2 ..." wget -q -nc http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 -P ${image_path} fi -sudo cp ${cloudify_image} ${DOVETAIL_CONFIG} +sudo cp ${cloudify_image} ${DOVETAIL_IMAGES} trusty_image=${image_path}/trusty-server-cloudimg-amd64-disk1.img if [[ ! -f ${trusty_image} ]]; then echo "Download image trusty-server-cloudimg-amd64-disk1.img ..." wget -q -nc http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -P ${image_path} fi -sudo cp ${trusty_image} ${DOVETAIL_CONFIG} +sudo cp ${trusty_image} ${DOVETAIL_IMAGES} opts="--privileged=true -id" @@ -236,10 +239,8 @@ fi echo "Dovetail: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}" docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect -env4bgpvpn="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP}" - cmd="docker run ${opts} -e DOVETAIL_HOME=${DOVETAIL_HOME} ${docker_volume} ${dovetail_home_volume} \ - ${sshkey} ${env4bgpvpn} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash" + ${sshkey} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash" echo "Dovetail: running docker run command: ${cmd}" ${cmd} >${redirect} sleep 5 diff --git a/jjb/global-jjb b/jjb/global-jjb new file mode 160000 +Subproject 779110b5cd63f3eabb63598a1be79d9b9ba8546 diff --git a/jjb/xci/xci-run-functest.sh b/jjb/xci/xci-run-functest.sh index 60b48cf7f..1f616de1f 100755 --- a/jjb/xci/xci-run-functest.sh +++ b/jjb/xci/xci-run-functest.sh @@ -51,7 +51,7 @@ if ! sed -n "/^- scenario: $DEPLOY_SCENARIO$/,/^$/p" $OPNFV_SCENARIO_REQUIREMENT exit 0 fi -ssh -F $HOME/.ssh/${DISTRO}-xci-vm-config ${DISTRO}_xci_vm "cd releng-xci/xci && PATH=/home/devuser/.local/bin:$PATH ansible-playbook -i installer/osa/files/$XCI_FLAVOR/inventory playbooks/prepare-functest.yml" +ssh -F $HOME/.ssh/${DISTRO}-xci-vm-config ${DISTRO}_xci_vm_opnfv "cd /root/releng-xci/xci/playbooks && ansible-playbook -i inventory prepare-functest.yml" echo "Running functest" ssh -F $HOME/.ssh/${DISTRO}-xci-vm-config ${DISTRO}_xci_vm_opnfv "/root/run-functest.sh" echo "Functest log" diff --git a/jjb/xci/xci-verify-jobs.yml b/jjb/xci/xci-verify-jobs.yml index 383af2f71..d78dc82d9 100644 --- a/jjb/xci/xci-verify-jobs.yml +++ b/jjb/xci/xci-verify-jobs.yml @@ -110,6 +110,12 @@ forbidden-file-paths: - compare-type: ANT pattern: 'xci/scripts/vm/**' + - compare-type: ANT + pattern: 'docs/**' + - compare-type: ANT + pattern: 'prototypes/**' + - compare-type: ANT + pattern: 'upstream/**' - project-compare-type: 'REG_EXP' project-pattern: 'sfc|sdnvpn' branches: diff --git a/modules/opnfv/deployment/compass/adapter_container.py b/modules/opnfv/deployment/compass/adapter_container.py new file mode 100644 index 000000000..1713fe274 --- /dev/null +++ b/modules/opnfv/deployment/compass/adapter_container.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +from opnfv.deployment import manager +from opnfv.utils import opnfv_logger as logger +from opnfv.utils import ssh_utils + +import yaml +import os + +logger = logger.Logger(__name__).getLogger() + + +class ContainerizedCompassAdapter(): + + def __init__(self, installer_ip, installer_user, pkey_file): + + self.installer = 'compass' + self.installer_ip = installer_ip + self.installer_user = installer_user + self.pkey_file = pkey_file + self.DST_PATH_UC = "/tmp/openstack_user_config.yml" + self.nodes = [] + self.ROLES = {} + + if pkey_file is not None and not os.path.isfile(pkey_file): + raise Exception( + 'The private key file %s does not exist!' % pkey_file) + + def _find_nodes(self, file): + nodes = file['compute_hosts'] + for compute in nodes: + self.ROLES[compute] = 'compute' + controllers = file['haproxy_hosts'] + for controller in controllers: + nodes[controller] = controllers[controller] + self.ROLES[controller] = 'controller' + return nodes + + def _process_nodes(self, raw_nodes): + nodes = [] + + for node in raw_nodes: + name = node + ip = raw_nodes[node]['ip'] + status = 'active' + id = None + if self.ROLES[node] == 'controller': + roles = 'controller' + elif self.ROLES[node] == 'compute': + roles = 'compute' + ssh_client = ssh_utils.get_ssh_client(hostname=ip, + username=self.installer_user, + pkey_file=self.pkey_file) + node = manager.Node(id, ip, name, status, roles, ssh_client) + nodes.append(node) + + return nodes + + def get_nodes(self, options=None): + try: + # if we have retrieved previously all the nodes, don't do it again + # This fails the first time when the constructor calls this method + # therefore the try/except + if len(self.nodes) > 0: + return self.nodes + except: + pass + + with open(self.DST_PATH_UC, 'r') as stream: + try: + file = yaml.load(stream) + raw_nodes = self._find_nodes(file) + except yaml.YAMLError as exc: + logger.error(exc) + self.nodes = self._process_nodes(raw_nodes) + return self.nodes diff --git a/modules/opnfv/deployment/factory.py b/modules/opnfv/deployment/factory.py index 2788e5eaa..1fd8d447b 100644 --- a/modules/opnfv/deployment/factory.py +++ b/modules/opnfv/deployment/factory.py @@ -9,7 +9,7 @@ from opnfv.deployment.apex import adapter as apex_adapter -from opnfv.deployment.compass import adapter as compass_adapter +from opnfv.deployment.compass import adapter_container as compass_adapter from opnfv.deployment.fuel import adapter as fuel_adapter from opnfv.deployment.osa import adapter as osa_adapter from opnfv.deployment.daisy import adapter as daisy_adapter @@ -44,10 +44,10 @@ class Factory(object): installer_user=installer_user, installer_pwd=installer_pwd) elif installer.lower() == "compass": - return compass_adapter.CompassAdapter( + return compass_adapter.ContainerizedCompassAdapter( installer_ip=installer_ip, installer_user=installer_user, - installer_pwd=installer_pwd) + pkey_file=pkey_file) elif installer.lower() == "osa": return osa_adapter.OSAAdapter(installer_ip=installer_ip, installer_user=installer_user, |