summaryrefslogtreecommitdiffstats
path: root/ci
diff options
context:
space:
mode:
authorFeng Pan <fpan@redhat.com>2017-08-23 17:26:53 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-08-23 17:26:53 +0000
commitc6f04a5dee991a131a49c2fde9c5990fe2edac4e (patch)
tree6385096492e6526bf091bae4f3b956a1e865fbba /ci
parent52b4c2556b909a1e61b50f0ff75778bed962ba85 (diff)
parentf4d388ea508ba00771e43a219ac64e0d430b73bd (diff)
Merge "Migrates Apex to Python"
Diffstat (limited to 'ci')
-rw-r--r--ci/build.py234
-rwxr-xr-xci/build.sh2
-rwxr-xr-xci/clean.sh118
-rwxr-xr-xci/deploy.sh248
-rwxr-xr-xci/run_smoke_tests.sh2
-rwxr-xr-xci/util.sh84
6 files changed, 192 insertions, 496 deletions
diff --git a/ci/build.py b/ci/build.py
deleted file mode 100644
index a17b21bd..00000000
--- a/ci/build.py
+++ /dev/null
@@ -1,234 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import argparse
-import logging
-import os
-import subprocess
-import sys
-import uuid
-import yaml
-
-CACHE_JOURNAL = 'cache_journal.yaml'
-TMP_CACHE = '.cache'
-BUILD_ROOT = 'build'
-BUILD_LOG_FILE = './apex_build.log'
-
-class ApexBuildException(Exception):
- pass
-
-
-def create_build_parser():
- build_parser = argparse.ArgumentParser()
- build_parser.add_argument('--debug', action='store_true', default=False,
- help="Turn on debug messages")
- build_parser.add_argument('-l', '--log-file',
- default=BUILD_LOG_FILE,
- dest='log_file', help="Log file to log to")
- build_parser.add_argument('-c', '--cache-dir',
- dest='cache_dir',
- default=None,
- help='Directory to store cache')
- build_parser.add_argument('--iso', action='store_true',
- default=False,
- help='Build ISO image')
- build_parser.add_argument('--rpms', action='store_true',
- default=False,
- help='Build RPMs')
- build_parser.add_argument('-r', '--release',
- dest='build_version',
- help='Version to apply to build '
- 'artifact label')
-
- return build_parser
-
-
-def get_journal(cache_dir):
- """
- Search for the journal file and returns its contents
- :param cache_dir: cache storage directory where journal file is
- :return: content of journal file
- """
- journal_file = "{}/{}".format(cache_dir, CACHE_JOURNAL)
- if os.path.isfile(journal_file) is False:
- logging.info("Journal file not found {}, skipping cache search".format(
- journal_file))
- else:
- with open(journal_file, 'r') as fh:
- cache_journal = yaml.safe_load(fh)
- assert isinstance(cache_journal, list)
- return cache_journal
-
-
-def get_cache_file(cache_dir):
- """
- Searches for a valid cache entry in the cache journal
- :param cache_dir: directory where cache and journal are located
- :return: name of valid cache file
- """
- cache_journal = get_journal(cache_dir)
- if cache_journal is not None:
- valid_cache = cache_journal[-1]
- if os.path.isfile(valid_cache):
- return valid_cache
-
-
-def unpack_cache(cache_dest, cache_dir=None):
- if cache_dir is None:
- logging.info("Cache directory not provided, skipping cache unpack")
- return
- elif os.path.isdir(cache_dir) is False:
- logging.info("Cache Directory does not exist, skipping cache unpack")
- return
- else:
- logging.info("Cache Directory Found: {}".format(cache_dir))
- cache_file = get_cache_file(cache_dir)
- if cache_file is None:
- logging.info("No cache file detected, skipping cache unpack")
- return
- logging.info("Unpacking Cache {}".format(cache_file))
- if not os.path.exists(cache_dest):
- os.makedirs(cache_dest)
- try:
- subprocess.check_call(["tar", "xvf", cache_file, "-C", cache_dest])
- except subprocess.CalledProcessError:
- logging.warning("Cache unpack failed")
- return
- logging.info("Cache unpacked, contents are: {}",
- os.listdir(cache_dest))
-
-
-def build(build_root, version, iso=False, rpms=False):
- if iso:
- make_targets = ['iso']
- elif rpms:
- make_targets = ['rpms']
- else:
- make_targets = ['images', 'rpms-check']
- if version is not None:
- make_args = ['RELEASE={}'.format(version)]
- else:
- make_args = []
- logging.info('Building targets: {}'.format(make_targets))
- try:
- output = subprocess.check_output(["make"] + make_args + ["-C",
- build_root] + make_targets)
- logging.info(output)
- except subprocess.CalledProcessError as e:
- logging.error("Failed to build Apex artifacts")
- logging.error(e.output)
- raise e
-
-
-def build_cache(cache_source, cache_dir):
- """
- Tar up new cache with unique name and store it in cache storage
- directory. Also update journal file with new cache entry.
- :param cache_source: source files to tar up when building cache file
- :param cache_dir: cache storage location
- :return: None
- """
- if cache_dir is None:
- logging.info("No cache dir specified, will not build cache")
- return
- cache_name = 'apex-cache-{}.tgz'.format(str(uuid.uuid4()))
- cache_full_path = os.path.join(cache_dir, cache_name)
- os.makedirs(cache_dir, exist_ok=True)
- try:
- subprocess.check_call(['tar', '--atime-preserve', '--dereference',
- '-caf', cache_full_path, '-C', cache_source,
- '.'])
- except BaseException as e:
- logging.error("Unable to build new cache tarball")
- if os.path.isfile(cache_full_path):
- os.remove(cache_full_path)
- raise e
- if os.path.isfile(cache_full_path):
- logging.info("Cache Build Complete")
- # update journal
- cache_entries = get_journal(cache_dir)
- if cache_entries is None:
- cache_entries = [cache_name]
- else:
- cache_entries.append(cache_name)
- journal_file = os.path.join(cache_dir, CACHE_JOURNAL)
- with open(journal_file, 'w') as fh:
- yaml.safe_dump(cache_entries, fh, default_flow_style=False)
- logging.info("Journal updated with new entry: {}".format(cache_name))
- else:
- logging.warning("Cache file did not build correctly")
-
-
-def prune_cache(cache_dir):
- """
- Remove older cache entries if there are more than 2
- :param cache_dir: Cache storage directory
- :return: None
- """
- if cache_dir is None:
- return
- cache_modified_flag = False
- cache_entries = get_journal(cache_dir)
- while len(cache_entries) > 2:
- logging.debug("Will remove older cache entries")
- cache_to_rm = cache_entries[0]
- cache_full_path = os.path.join(cache_dir, cache_to_rm)
- if os.path.isfile(cache_full_path):
- try:
- os.remove(cache_full_path)
- cache_entries.pop(0)
- cache_modified_flag = True
- except os.EX_OSERR:
- logging.warning("Failed to remove cache file: {}".format(
- cache_full_path))
- break
-
- else:
- logging.debug("No more cache cleanup necessary")
-
- if cache_modified_flag:
- logging.debug("Updating cache journal")
- journal_file = os.path.join(cache_dir, CACHE_JOURNAL)
- with open(journal_file, 'w') as fh:
- yaml.safe_dump(cache_entries, fh, default_flow_style=False)
-
-if __name__ == '__main__':
- parser = create_build_parser()
- args = parser.parse_args(sys.argv[1:])
- if args.debug:
- log_level = logging.DEBUG
- else:
- log_level = logging.INFO
- os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
- formatter = '%(asctime)s %(levelname)s: %(message)s'
- logging.basicConfig(filename=args.log_file,
- format=formatter,
- datefmt='%m/%d/%Y %I:%M:%S %p',
- level=log_level)
- console = logging.StreamHandler()
- console.setLevel(log_level)
- console.setFormatter(logging.Formatter(formatter))
- logging.getLogger('').addHandler(console)
- apex_root = os.path.split(os.getcwd())[0]
- for root, dirs, files in os.walk(apex_root):
- if BUILD_ROOT in dirs:
- apex_root = root
- apex_build_root = os.path.join(apex_root, BUILD_ROOT)
- if os.path.isdir(apex_build_root):
- cache_tmp_dir = os.path.join(apex_root, TMP_CACHE)
- else:
- logging.error("You must execute this script inside of the Apex "
- "local code repository")
- raise ApexBuildException("Invalid path for apex root: {}. Must be "
- "invoked from within Apex code directory.".
- format(apex_root))
- unpack_cache(cache_tmp_dir, args.cache_dir)
- build(apex_build_root, args.build_version, args.iso, args.rpms)
- build_cache(cache_tmp_dir, args.cache_dir)
- prune_cache(args.cache_dir)
diff --git a/ci/build.sh b/ci/build.sh
index 5cd2c28d..113f35d6 100755
--- a/ci/build.sh
+++ b/ci/build.sh
@@ -13,4 +13,4 @@ set -e
rpm -q ansible || sudo yum -y install ansible
ansible-playbook --become -i "localhost," -c local $DIR/../lib/ansible/playbooks/build_dependencies.yml -vvv
make -C $DIR/../build clean
-python3 $DIR/build.py $@
+python3 $DIR/../apex/build.py $@
diff --git a/ci/clean.sh b/ci/clean.sh
index fba1f126..e35b95b1 100755
--- a/ci/clean.sh
+++ b/ci/clean.sh
@@ -12,23 +12,11 @@
#author: Dan Radez (dradez@redhat.com)
#author: Tim Rozet (trozet@redhat.com)
-# Use default if no param passed
-BASE=${BASE:-'/var/opt/opnfv'}
-IMAGES=${IMAGES:-"$BASE/images"}
-LIB=${LIB:-"$BASE/lib"}
reset=$(tput sgr0 || echo "")
blue=$(tput setaf 4 || echo "")
red=$(tput setaf 1 || echo "")
green=$(tput setaf 2 || echo "")
-##LIBRARIES
-for lib in common-functions parse-functions; do
- if ! source $LIB/${lib}.sh; then
- echo "Failed to source $LIB/${lib}.sh"
- exit 1
- fi
-done
-
vm_index=4
ovs_bridges="br-admin br-tenant br-external br-storage"
ovs_bridges+=" br-private br-public" # Legacy names, remove in E river
@@ -37,6 +25,102 @@ ovs_bridges+=" br-private br-public" # Legacy names, remove in E river
OPNFV_NETWORK_TYPES+=" admin tenant external storage api"
OPNFV_NETWORK_TYPES+=" admin_network private_network public_network storage_network api_network" # Legecy names, remove in E river
+##detach interface from OVS and set the network config correctly
+##params: bridge to detach from
+##assumes only 1 real interface attached to OVS
+function detach_interface_from_ovs {
+ local bridge
+ local port_output ports_no_orig
+ local net_path
+ local if_ip if_mask if_gw if_prefix
+ local if_metric if_dns1 if_dns2
+
+ net_path=/etc/sysconfig/network-scripts/
+ if [[ -z "$1" ]]; then
+ return 1
+ else
+ bridge=$1
+ fi
+
+ # if no interfaces attached then return
+ if ! ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*"; then
+ return 0
+ fi
+
+ # look for .orig ifcfg files to use
+ port_output=$(ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*")
+ while read -r line; do
+ if [ -z "$line" ]; then
+ continue
+ elif [ -e ${net_path}/ifcfg-${line}.orig ]; then
+ mv -f ${net_path}/ifcfg-${line}.orig ${net_path}/ifcfg-${line}
+ elif [ -e ${net_path}/ifcfg-${bridge} ]; then
+ if_ip=$(sed -n 's/^IPADDR=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+ if_mask=$(sed -n 's/^NETMASK=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+ if_gw=$(sed -n 's/^GATEWAY=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+ if_metric=$(sed -n 's/^METRIC=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+ if_dns1=$(sed -n 's/^DNS1=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+ if_dns2=$(sed -n 's/^DNS2=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+
+ if [ -z "$if_mask" ]; then
+ if_prefix=$(sed -n 's/^PREFIX=[^0-9]*\([0-9][0-9]*\)[^0-9]*$/\1/p' ${net_path}/ifcfg-${bridge})
+ if_mask=$(prefix2mask ${if_prefix})
+ fi
+
+ if [[ -z "$if_ip" || -z "$if_mask" ]]; then
+ echo "ERROR: IPADDR or PREFIX/NETMASK missing for ${bridge} and no .orig file for interface ${line}"
+ return 1
+ fi
+
+ # create if cfg
+ echo "DEVICE=${line}
+IPADDR=${if_ip}
+NETMASK=${if_mask}
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=Ethernet
+NM_CONTROLLED=no
+PEERDNS=no" > ${net_path}/ifcfg-${line}
+
+ if [ -n "$if_gw" ]; then
+ echo "GATEWAY=${if_gw}" >> ${net_path}/ifcfg-${line}
+ fi
+
+ if [ -n "$if_metric" ]; then
+ echo "METRIC=${if_metric}" >> ${net_path}/ifcfg-${line}
+ fi
+
+ if [[ -n "$if_dns1" || -n "$if_dns2" ]]; then
+ sed -i '/PEERDNS/c\PEERDNS=yes' ${net_path}/ifcfg-${line}
+
+ if [ -n "$if_dns1" ]; then
+ echo "DNS1=${if_dns1}" >> ${net_path}/ifcfg-${line}
+ fi
+
+ if [ -n "$if_dns2" ]; then
+ echo "DNS2=${if_dns2}" >> ${net_path}/ifcfg-${line}
+ fi
+ fi
+ break
+ else
+ echo "ERROR: Real interface ${line} attached to bridge, but no interface or ${bridge} ifcfg file exists"
+ return 1
+ fi
+
+ done <<< "$port_output"
+
+ # modify the bridge ifcfg file
+ # to remove IP params
+ sudo sed -i 's/IPADDR=.*//' ${net_path}/ifcfg-${bridge}
+ sudo sed -i 's/NETMASK=.*//' ${net_path}/ifcfg-${bridge}
+ sudo sed -i 's/GATEWAY=.*//' ${net_path}/ifcfg-${bridge}
+ sudo sed -i 's/DNS1=.*//' ${net_path}/ifcfg-${bridge}
+ sudo sed -i 's/DNS2=.*//' ${net_path}/ifcfg-${bridge}
+ sudo sed -i 's/METRIC=.*//' ${net_path}/ifcfg-${bridge}
+ sudo sed -i 's/PEERDNS=.*//' ${net_path}/ifcfg-${bridge}
+
+ sudo systemctl restart network
+}
display_usage() {
echo -e "Usage:\n$0 [arguments] \n"
@@ -47,7 +131,7 @@ display_usage() {
##params: $@ the entire command line is passed
##usage: parse_cmd_line() "$@"
parse_cmdline() {
- echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
+ echo -e "\n\n${blue}This script is used to clean an Apex environment${reset}\n\n"
echo "Use -h to display help"
sleep 2
@@ -79,7 +163,13 @@ parse_cmdline "$@"
if [ -n "$INVENTORY_FILE" ]; then
echo -e "${blue}INFO: Parsing inventory file...${reset}"
- if ! python3 -B $LIB/python/apex_python_utils.py clean -f ${INVENTORY_FILE}; then
+ # hack for now (until we switch fully over to clean.py) to tell if
+ # we should install apex from python or if rpm is being used
+ if ! rpm -q opnfv-apex-common > /dev/null; then
+ pushd ../ && python3 setup.py install > /dev/null
+ popd
+ fi
+ if ! python3 -m apex.clean -f ${INVENTORY_FILE}; then
echo -e "${red}WARN: Unable to shutdown all nodes! Please check /var/log/apex.log${reset}"
else
echo -e "${blue}INFO: Node shutdown complete...${reset}"
diff --git a/ci/deploy.sh b/ci/deploy.sh
index f1a807f7..0ba0c74b 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
##############################################################################
# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
#
@@ -12,245 +12,11 @@
# author: Dan Radez (dradez@redhat.com)
# author: Tim Rozet (trozet@redhat.com)
#
-# Based on RDO Manager http://www.rdoproject.org
set -e
-
-##VARIABLES
-reset=$(tput sgr0 || echo "")
-blue=$(tput setaf 4 || echo "")
-red=$(tput setaf 1 || echo "")
-green=$(tput setaf 2 || echo "")
-
-interactive="FALSE"
-ping_site="8.8.8.8"
-dnslookup_site="www.google.com"
-post_config="TRUE"
-debug="FALSE"
-
-ovs_rpm_name=openvswitch-2.6.1-1.el7.centos.x86_64.rpm
-ovs_kmod_rpm_name=openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm
-
-declare -i CNT
-declare UNDERCLOUD
-declare -A deploy_options_array
-declare -a performance_options
-declare -A NET_MAP
-
-APEX_TMP_DIR=$(python3 -c "import tempfile; print(tempfile.mkdtemp())")
-SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
-DEPLOY_OPTIONS=""
-BASE=${BASE:-'/var/opt/opnfv'}
-IMAGES=${IMAGES:-"$BASE/images"}
-LIB=${LIB:-"$BASE/lib"}
-OPNFV_NETWORK_TYPES="admin tenant external storage api"
-ENV_FILE="opnfv-environment.yaml"
-
-VM_CPUS=4
-VM_RAM=8
-VM_COMPUTES=1
-
-# Netmap used to map networks to OVS bridge names
-NET_MAP['admin']="br-admin"
-NET_MAP['tenant']="br-tenant"
-NET_MAP['external']="br-external"
-NET_MAP['storage']="br-storage"
-NET_MAP['api']="br-api"
-ext_net_type="interface"
-ip_address_family=4
-
-# Libraries
-lib_files=(
-$LIB/common-functions.sh
-$LIB/configure-deps-functions.sh
-$LIB/parse-functions.sh
-$LIB/virtual-setup-functions.sh
-$LIB/undercloud-functions.sh
-$LIB/overcloud-deploy-functions.sh
-$LIB/post-install-functions.sh
-$LIB/utility-functions.sh
-)
-for lib_file in ${lib_files[@]}; do
- if ! source $lib_file; then
- echo -e "${red}ERROR: Failed to source $lib_file${reset}"
- exit 1
- fi
-done
-
-display_usage() {
- echo -e "Usage:\n$0 [arguments] \n"
- echo -e " --deploy-settings | -d : Full path to deploy settings yaml file. Optional. Defaults to null"
- echo -e " --inventory | -i : Full path to inventory yaml file. Required only for baremetal"
- echo -e " --net-settings | -n : Full path to network settings file. Optional."
- echo -e " --ping-site | -p : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8"
- echo -e " --dnslookup-site : site to use to verify DNS resolution. Optional. Defaults to www.google.com"
- echo -e " --virtual | -v : Virtualize overcloud nodes instead of using baremetal."
- echo -e " --no-post-config : disable Post Install configuration."
- echo -e " --debug : enable debug output."
- echo -e " --interactive : enable interactive deployment mode which requires user to confirm steps of deployment."
- echo -e " --virtual-cpus : Number of CPUs to use per Overcloud VM in a virtual deployment (defaults to 4)."
- echo -e " --virtual-computes : Number of Virtual Compute nodes to create and use during deployment (defaults to 1 for noha and 2 for ha)."
- echo -e " --virtual-default-ram : Amount of default RAM to use per Overcloud VM in GB (defaults to 8)."
- echo -e " --virtual-compute-ram : Amount of RAM to use per Overcloud Compute VM in GB (defaults to 8). Overrides --virtual-default-ram arg for computes"
-}
-
-##translates the command line parameters into variables
-##params: $@ the entire command line is passed
-##usage: parse_cmd_line() "$@"
-parse_cmdline() {
- echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
- echo "Use -h to display help"
-
- while [ "${1:0:1}" = "-" ]
- do
- case "$1" in
- -h|--help)
- display_usage
- exit 0
- ;;
- -d|--deploy-settings)
- DEPLOY_SETTINGS_FILE=$2
- echo "Deployment Configuration file: $2"
- shift 2
- ;;
- -i|--inventory)
- INVENTORY_FILE=$2
- shift 2
- ;;
- -n|--net-settings)
- NETSETS=$2
- echo "Network Settings Configuration file: $2"
- shift 2
- ;;
- -e|--environment-file)
- ENV_FILE=$2
- echo "Base OOO Environment file: $2"
- shift 2
- ;;
- -p|--ping-site)
- ping_site=$2
- echo "Using $2 as the ping site"
- shift 2
- ;;
- --dnslookup-site)
- dnslookup_site=$2
- echo "Using $2 as the dnslookup site"
- shift 2
- ;;
- -v|--virtual)
- virtual="TRUE"
- echo "Executing a Virtual Deployment"
- shift 1
- ;;
- --no-post-config )
- post_config="FALSE"
- echo "Post install configuration disabled"
- shift 1
- ;;
- --debug )
- debug="TRUE"
- echo "Enable debug output"
- shift 1
- ;;
- --interactive )
- interactive="TRUE"
- echo "Interactive mode enabled"
- shift 1
- ;;
- --virtual-cpus )
- VM_CPUS=$2
- echo "Number of CPUs per VM set to $VM_CPUS"
- shift 2
- ;;
- --virtual-default-ram )
- VM_RAM=$2
- echo "Amount of Default RAM per VM set to $VM_RAM"
- shift 2
- ;;
- --virtual-computes )
- VM_COMPUTES=$2
- echo "Virtual Compute nodes set to $VM_COMPUTES"
- shift 2
- ;;
- --virtual-compute-ram )
- VM_COMPUTE_RAM=$2
- echo "Virtual Compute RAM set to $VM_COMPUTE_RAM"
- shift 2
- ;;
- *)
- display_usage
- exit 1
- ;;
- esac
- done
- sleep 2
-
- if [[ -z "$NETSETS" ]]; then
- echo -e "${red}ERROR: You must provide a network_settings file with -n.${reset}"
- exit 1
- fi
-
- # inventory file usage validation
- if [[ -n "$virtual" ]]; then
- if [[ -n "$INVENTORY_FILE" ]]; then
- echo -e "${red}ERROR: You should not specify an inventory file with virtual deployments${reset}"
- exit 1
- else
- INVENTORY_FILE="$APEX_TMP_DIR/inventory-virt.yaml"
- fi
- elif [[ -z "$INVENTORY_FILE" ]]; then
- echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
- exit 1
- elif [[ ! -f "$INVENTORY_FILE" ]]; then
- echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
- exit 1
- fi
-
- if [[ -z "$DEPLOY_SETTINGS_FILE" || ! -f "$DEPLOY_SETTINGS_FILE" ]]; then
- echo -e "${red}ERROR: Deploy Settings: ${DEPLOY_SETTINGS_FILE} does not exist! Exiting...${reset}"
- exit 1
- fi
-
- if [[ ! -z "$NETSETS" && ! -f "$NETSETS" ]]; then
- echo -e "${red}ERROR: Network Settings: ${NETSETS} does not exist! Exiting...${reset}"
- exit 1
- fi
-
-}
-
-main() {
- parse_cmdline "$@"
- if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
- echo -e "${blue}INFO: Parsing deploy settings file...${reset}"
- parse_deploy_settings
- fi
- echo -e "${blue}INFO: Parsing network settings file...${reset}"
- parse_network_settings
- if ! configure_deps; then
- echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
- exit 1
- fi
- #Correct the time on the server prior to launching any VMs
- if ntpdate $ntp_server; then
- hwclock --systohc
- else
- echo "${blue}WARNING: ntpdate failed to update the time on the server. ${reset}"
- fi
- setup_undercloud_vm
- if [ "$virtual" == "TRUE" ]; then
- setup_virtual_baremetal $VM_CPUS $VM_RAM
- fi
- parse_inventory_file
- configure_undercloud
- overcloud_deploy
- if [ "$post_config" == "TRUE" ]; then
- if ! configure_post_install; then
- echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}"
- exit 1
- else
- echo -e "${blue}INFO: Post Install Configuration Complete${reset}"
- fi
- fi
-}
-
-main "$@"
+yum -y install python34 python34-devel libvirt-devel python34-pip python-tox ansible
+mkdir -p /home/jenkins-ci/tmp
+mv -f .build /home/jenkins-ci/tmp/
+pip3 install --upgrade --force-reinstall .
+mv -f /home/jenkins-ci/tmp/.build .
+opnfv-deploy $@
diff --git a/ci/run_smoke_tests.sh b/ci/run_smoke_tests.sh
index 7cbd390d..517822ef 100755
--- a/ci/run_smoke_tests.sh
+++ b/ci/run_smoke_tests.sh
@@ -1,7 +1,5 @@
#!/usr/bin/env bash
-source ../lib/utility-functions.sh
-
export ANSIBLE_HOST_KEY_CHECKING=False
./dev_dep_check.sh
diff --git a/ci/util.sh b/ci/util.sh
index 1a931d0b..a9df0213 100755
--- a/ci/util.sh
+++ b/ci/util.sh
@@ -2,12 +2,88 @@
# Utility script used to interact with a deployment
# @author Tim Rozet (trozet@redhat.com)
-BASE=${BASE:-'/var/opt/opnfv'}
-IMAGES=${IMAGES:-"$BASE/images"}
-LIB=${LIB:-"$BASE/lib"}
VALID_CMDS="undercloud overcloud opendaylight debug-stack mock-detached -h --help"
+SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
-source $LIB/utility-functions.sh
+##connects to undercloud
+##params: user to login with, command to execute on undercloud (optional)
+function undercloud_connect {
+ local user=$1
+
+ if [ -z "$1" ]; then
+ echo "Missing required argument: user to login as to undercloud"
+ return 1
+ fi
+
+ if [ -z "$2" ]; then
+ ssh ${SSH_OPTIONS[@]} ${user}@$(get_undercloud_ip)
+ else
+ ssh ${SSH_OPTIONS[@]} -T ${user}@$(get_undercloud_ip) "$2"
+ fi
+}
+
+##outputs the Undercloud's IP address
+##params: none
+function get_undercloud_ip {
+ echo $(arp -an | grep $(virsh domiflist undercloud | grep default |\
+ awk '{print $5}') | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+}
+
+##connects to overcloud nodes
+##params: node to login to, command to execute on overcloud (optional)
+function overcloud_connect {
+ local node
+ local node_output
+ local node_ip
+
+ if [ -z "$1" ]; then
+ echo "Missing required argument: overcloud node to login to"
+ return 1
+ elif ! echo "$1" | grep -E "(controller|compute)[0-9]+" > /dev/null; then
+ echo "Invalid argument: overcloud node to login to must be in the format: \
+controller<number> or compute<number>"
+ return 1
+ fi
+
+ node_output=$(undercloud_connect "stack" "source stackrc; nova list")
+ node=$(echo "$1" | sed -E 's/([a-zA-Z]+)([0-9]+)/\1-\2/')
+
+ node_ip=$(echo "$node_output" | grep "$node" | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+
+ if [ "$node_ip" == "" ]; then
+ echo -e "Unable to find IP for ${node} in \n${node_output}"
+ return 1
+ fi
+
+ if [ -z "$2" ]; then
+ ssh ${SSH_OPTIONS[@]} heat-admin@${node_ip}
+ else
+ ssh ${SSH_OPTIONS[@]} -T heat-admin@${node_ip} "$2"
+ fi
+}
+
+##connects to opendaylight karaf console
+##params: None
+function opendaylight_connect {
+ local opendaylight_ip
+ opendaylight_ip=$(undercloud_connect "stack" "cat overcloudrc | grep SDN_CONTROLLER_IP | grep -Eo [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+
+ if [ "$opendaylight_ip" == "" ]; then
+ echo -e "Unable to find IP for OpenDaylight in overcloudrc"
+ return 1
+ else
+ echo -e "Connecting to ODL Karaf console. Default password is 'karaf'"
+ fi
+
+ ssh -p 8101 ${SSH_OPTIONS[@]} karaf@${opendaylight_ip}
+}
+
+##outputs heat stack deployment failures
+##params: none
+function debug_stack {
+ source ~/stackrc
+ openstack stack failures list overcloud --long
+}
resolve_cmd() {
local given=$1