summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rwxr-xr-xlib/overcloud-deploy-functions.sh26
-rwxr-xr-xlib/parse-functions.sh10
-rwxr-xr-xlib/post-install-functions.sh4
-rw-r--r--lib/python/apex/common/constants.py1
-rw-r--r--lib/python/apex/common/utils.py8
-rw-r--r--lib/python/apex/deploy_settings.py11
-rw-r--r--lib/python/apex/inventory.py27
-rw-r--r--lib/python/apex/network_settings.py8
-rwxr-xr-xlib/python/apex_python_utils.py11
-rwxr-xr-xlib/undercloud-functions.sh10
-rw-r--r--lib/utility-functions.sh22
11 files changed, 84 insertions, 54 deletions
diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh
index 169640fe..980478cb 100755
--- a/lib/overcloud-deploy-functions.sh
+++ b/lib/overcloud-deploy-functions.sh
@@ -55,15 +55,15 @@ function overcloud_deploy {
# Make sure the correct overcloud image is available
- if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
- echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
+ if [ ! -f $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
+ echo "${red} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
exit 1
fi
echo "Copying overcloud image to Undercloud"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
- scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
+ scp ${SSH_OPTIONS[@]} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
# Install ovs-dpdk inside the overcloud image if it is enabled.
if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' || "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
@@ -130,9 +130,11 @@ EOI
# Set ODL version accordingly
if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && -n "${deploy_options_array['odl_version']}" ]]; then
case "${deploy_options_array['odl_version']}" in
+ beryllium) odl_version=''
+ ;;
boron) odl_version='boron'
;;
- cabron) odl_version='master'
+ carbon) odl_version='master'
;;
*) echo -e "${red}Invalid ODL version ${deploy_options_array['odl_version']}. Please use 'carbon' or 'boron' values.${reset}"
exit 1
@@ -287,11 +289,19 @@ openstack overcloud image upload
echo "Configuring undercloud and discovering nodes"
openstack baremetal import --json instackenv.json
-openstack baremetal configure boot
+
bash -x set_perf_images.sh ${performance_roles[@]}
-#if [[ -z "$virtual" ]]; then
-# openstack baremetal introspection bulk start
-#fi
+if [[ -z "$virtual" ]]; then
+ openstack baremetal introspection bulk start
+ if [[ -n "$root_disk_list" ]]; then
+ openstack baremetal configure boot --root-device=${root_disk_list}
+ else
+ openstack baremetal configure boot
+ fi
+else
+ openstack baremetal configure boot
+fi
+
echo "Configuring flavors"
for flavor in baremetal control compute; do
echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
diff --git a/lib/parse-functions.sh b/lib/parse-functions.sh
index 84da75c5..94eac01a 100755
--- a/lib/parse-functions.sh
+++ b/lib/parse-functions.sh
@@ -25,7 +25,7 @@ parse_network_settings() {
done
fi
- if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $CONFIG/network-environment.yaml $parse_ext); then
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $BASE/network-environment.yaml $parse_ext); then
echo -e "${blue}${output}${reset}"
eval "$output"
else
@@ -59,6 +59,7 @@ parse_deploy_settings() {
##params: none
##usage: parse_inventory_file
parse_inventory_file() {
+ local output
if [ "$virtual" == "TRUE" ]; then inv_virt="--virtual"; fi
if [[ "$ha_enabled" == "True" ]]; then inv_ha="--ha"; fi
instackenv_output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha)
@@ -69,5 +70,12 @@ cat > instackenv.json << EOF
$instackenv_output
EOF
EOI
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha --export-bash); then
+ echo -e "${blue}${output}${reset}"
+ eval "$output"
+ else
+ echo -e "${red}ERROR: Failed to parse inventory bash settings file ${INVENTORY_FILE}${reset}"
+ exit 1
+ fi
}
diff --git a/lib/post-install-functions.sh b/lib/post-install-functions.sh
index 102b86f5..eab17407 100755
--- a/lib/post-install-functions.sh
+++ b/lib/post-install-functions.sh
@@ -123,7 +123,9 @@ if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_a
done
fi
-if [ "${deploy_options_array['congress']}" == 'True' ]; then
+# TODO: Change this back to True once everything is back in
+# place with tht and puppet-congress for deployment
+if [ "${deploy_options_array['congress']}" == 'NeverTrue' ]; then
ds_configs="--config username=\$OS_USERNAME
--config tenant_name=\$OS_TENANT_NAME
--config password=\$OS_PASSWORD
diff --git a/lib/python/apex/common/constants.py b/lib/python/apex/common/constants.py
index 741bb4f8..3aa28eab 100644
--- a/lib/python/apex/common/constants.py
+++ b/lib/python/apex/common/constants.py
@@ -27,3 +27,4 @@ COMPUTE_PRE = "OS::TripleO::ComputeExtraConfigPre"
CONTROLLER_PRE = "OS::TripleO::ControllerExtraConfigPre"
PRE_CONFIG_DIR = "/usr/share/openstack-tripleo-heat-templates/puppet/" \
"extraconfig/pre_deploy/"
+DEFAULT_ROOT_DEV = 'sda'
diff --git a/lib/python/apex/common/utils.py b/lib/python/apex/common/utils.py
index d623638c..8e6896fa 100644
--- a/lib/python/apex/common/utils.py
+++ b/lib/python/apex/common/utils.py
@@ -21,3 +21,11 @@ def parse_yaml(yaml_file):
with open(yaml_file) as f:
parsed_dict = yaml.safe_load(f)
return parsed_dict
+
+
+def write_str(bash_str, path=None):
+ if path:
+ with open(path, 'w') as file:
+ file.write(bash_str)
+ else:
+ print(bash_str)
diff --git a/lib/python/apex/deploy_settings.py b/lib/python/apex/deploy_settings.py
index 3583646b..3133d7f8 100644
--- a/lib/python/apex/deploy_settings.py
+++ b/lib/python/apex/deploy_settings.py
@@ -11,6 +11,8 @@
import yaml
import logging
+from .common import utils
+
REQ_DEPLOY_SETTINGS = ['sdn_controller',
'odl_version',
'sdn_l3',
@@ -82,6 +84,8 @@ class DeploySettings(dict):
if req_set not in deploy_options:
if req_set == 'dataplane':
self['deploy_options'][req_set] = 'ovs'
+ elif req_set == 'ceph':
+ self['deploy_options'][req_set] = True
else:
self['deploy_options'][req_set] = False
@@ -163,12 +167,7 @@ class DeploySettings(dict):
if 'performance' in self['deploy_options']:
bash_str += self._dump_performance()
bash_str += self._dump_deploy_options_array()
-
- if path:
- with open(path, 'w') as file:
- file.write(bash_str)
- else:
- print(bash_str)
+ utils.write_str(bash_str, path)
class DeploySettingsException(Exception):
diff --git a/lib/python/apex/inventory.py b/lib/python/apex/inventory.py
index 711eb18f..ce16ef41 100644
--- a/lib/python/apex/inventory.py
+++ b/lib/python/apex/inventory.py
@@ -10,18 +10,22 @@
import yaml
import json
+from .common import constants
+from .common import utils
+
class Inventory(dict):
"""
This class parses an APEX inventory yaml file into an object. It
generates or detects all missing fields for deployment.
- It then collapses one level of identifcation from the object to
+ It then collapses one level of identification from the object to
convert it to a structure that can be dumped into a json file formatted
such that Triple-O can read the resulting json as an instackenv.json file.
"""
def __init__(self, source, ha=True, virtual=False):
init_dict = {}
+ self.root_device = constants.DEFAULT_ROOT_DEV
if isinstance(source, str):
with open(source, 'r') as inventory_file:
yaml_dict = yaml.safe_load(inventory_file)
@@ -40,8 +44,13 @@ class Inventory(dict):
node['pm_user'] = node['ipmi_user']
node['mac'] = [node['mac_address']]
- for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address'):
- del i
+ for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
+ 'disk_device'):
+ if i == 'disk_device' and 'disk_device' in node.keys():
+ self.root_device = node[i]
+ else:
+ continue
+ del node[i]
return node
@@ -53,7 +62,7 @@ class Inventory(dict):
'nodes for HA baremetal deployment')
elif len(self['nodes']) < 2:
raise InventoryException('You must provide at least 2 nodes '
- 'for non-HA baremetal deployment${reset}')
+ 'for non-HA baremetal deployment')
if virtual:
self['arch'] = 'x86_64'
@@ -67,6 +76,16 @@ class Inventory(dict):
def dump_instackenv_json(self):
print(json.dumps(dict(self), sort_keys=True, indent=4))
+ def dump_bash(self, path=None):
+ """
+ Prints settings for bash consumption.
+
+ If optional path is provided, bash string will be written to the file
+ instead of stdout.
+ """
+ bash_str = "{}={}\n".format('root_disk_list', str(self.root_device))
+ utils.write_str(bash_str, path)
+
class InventoryException(Exception):
def __init__(self, value):
diff --git a/lib/python/apex/network_settings.py b/lib/python/apex/network_settings.py
index 64065ca7..b04f141a 100644
--- a/lib/python/apex/network_settings.py
+++ b/lib/python/apex/network_settings.py
@@ -12,7 +12,7 @@ import logging
import ipaddress
from copy import copy
-
+from .common import utils
from . import ip_utils
from .common.constants import (
CONTROLLER,
@@ -338,11 +338,7 @@ class NetworkSettings(dict):
bash_str += flatten('dns_servers', self['dns_servers'], ' ')
bash_str += flatten('domain_name', self['dns-domain'], ' ')
bash_str += flatten('ntp_server', self['ntp_servers'][0], ' ')
- if path:
- with open(path, 'w') as file:
- file.write(bash_str)
- else:
- print(bash_str)
+ utils.write_str(bash_str, path)
def get_ip_addr_family(self,):
"""
diff --git a/lib/python/apex_python_utils.py b/lib/python/apex_python_utils.py
index b0ebb270..e21d0464 100755
--- a/lib/python/apex_python_utils.py
+++ b/lib/python/apex_python_utils.py
@@ -22,7 +22,6 @@ from apex import NetworkEnvironment
from apex import DeploySettings
from apex import Inventory
from apex import ip_utils
-from apex.common.constants import ADMIN_NETWORK
def parse_net_settings(args):
@@ -66,7 +65,10 @@ def run_clean(args):
def parse_inventory(args):
inventory = Inventory(args.file, ha=args.ha, virtual=args.virtual)
- inventory.dump_instackenv_json()
+ if args.export_bash is True:
+ inventory.dump_bash()
+ else:
+ inventory.dump_instackenv_json()
def find_ip(args):
@@ -200,6 +202,11 @@ def get_parser():
default=False,
action='store_true',
help='Indicate if deployment inventory is virtual')
+ inventory.add_argument('--export-bash',
+ default=False,
+ dest='export_bash',
+ action='store_true',
+ help='Export bash variables from inventory')
inventory.set_defaults(func=parse_inventory)
clean = subparsers.add_parser('clean',
diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh
index 6f7addbd..080fcbbd 100755
--- a/lib/undercloud-functions.sh
+++ b/lib/undercloud-functions.sh
@@ -19,7 +19,7 @@ function setup_undercloud_vm {
define_vm undercloud hd 30 "$undercloud_nets" 4 12288
### this doesn't work for some reason I was getting hangup events so using cp instead
- #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
+ #virsh vol-upload --pool default --vol undercloud.qcow2 --file $BASE/stack/undercloud.qcow2
#2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
#2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
#2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
@@ -28,14 +28,14 @@ function setup_undercloud_vm {
#error: Reconnected to the hypervisor
local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
- cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
+ cp -f $IMAGES/undercloud.qcow2 $undercloud_dst
# resize Undercloud machine
echo "Checking if Undercloud needs to be resized..."
undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$undercloud_size" -lt 30 ]; then
qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
- LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
+ LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $IMAGES/undercloud.qcow2 $undercloud_dst
LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$new_size" -lt 30 ]; then
@@ -136,12 +136,12 @@ function configure_undercloud {
ovs_dpdk_bridge=''
fi
- if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e "br-ex"); then
+ if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e "br-ex"); then
echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
exit 1
fi
- if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
+ if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
exit 1
fi
diff --git a/lib/utility-functions.sh b/lib/utility-functions.sh
index 5c28b46c..c12619ae 100644
--- a/lib/utility-functions.sh
+++ b/lib/utility-functions.sh
@@ -80,26 +80,6 @@ function opendaylight_connect {
##outputs heat stack deployment failures
##params: none
function debug_stack {
- local failure_output
- local phys_id
- declare -a resource_arr
- declare -a phys_id_arr
-
source ~/stackrc
-
- IFS=$'\n'
- for resource in $(openstack stack resource list -n 5 overcloud | grep FAILED); do
- unset IFS
- resource_arr=(${resource//|/ })
- phys_id=$(openstack stack resource show ${resource_arr[-1]} ${resource_arr[0]} | grep physical_resource_id 2> /dev/null)
- if [ -n "$phys_id" ]; then
- phys_id_arr=(${phys_id//|/ })
- failure_output+="******************************************************"
- failure_output+="\n${resource}:\n\n$(openstack stack deployment show ${phys_id_arr[-1]} 2> /dev/null)"
- failure_output+="\n******************************************************"
- fi
- unset phys_id
- done
-
- echo -e $failure_output
+ openstack stack failures list overcloud --long
}