summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/common-functions.sh2
-rwxr-xr-xlib/configure-deps-functions.sh2
-rwxr-xr-xlib/overcloud-deploy-functions.sh56
-rwxr-xr-xlib/parse-functions.sh10
-rwxr-xr-xlib/post-install-functions.sh37
-rw-r--r--lib/python/apex/common/constants.py1
-rw-r--r--lib/python/apex/common/utils.py8
-rw-r--r--lib/python/apex/deploy_settings.py18
-rw-r--r--lib/python/apex/inventory.py33
-rw-r--r--lib/python/apex/network_environment.py47
-rw-r--r--lib/python/apex/network_settings.py38
-rwxr-xr-xlib/python/apex_python_utils.py11
-rwxr-xr-xlib/undercloud-functions.sh20
-rw-r--r--lib/utility-functions.sh24
14 files changed, 185 insertions, 122 deletions
diff --git a/lib/common-functions.sh b/lib/common-functions.sh
index 2d113450..709dbf97 100644
--- a/lib/common-functions.sh
+++ b/lib/common-functions.sh
@@ -281,7 +281,7 @@ contains_prefix() {
#params: none
function verify_internet {
if ping -c 2 $ping_site > /dev/null; then
- if ping -c 2 www.google.com > /dev/null; then
+ if ping -c 2 $dnslookup_site > /dev/null; then
echo "${blue}Internet connectivity detected${reset}"
return 0
else
diff --git a/lib/configure-deps-functions.sh b/lib/configure-deps-functions.sh
index 1d238f87..b9799969 100755
--- a/lib/configure-deps-functions.sh
+++ b/lib/configure-deps-functions.sh
@@ -75,7 +75,7 @@ EOF
if [[ "$network" != "admin" && "$network" != "external" ]]; then
continue
fi
- this_interface=$(eval echo \${${network}_bridged_interface})
+ this_interface=$(eval echo \${${network}_installer_vm_members})
# check if this a bridged interface for this network
if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh
index 4b592b46..980478cb 100755
--- a/lib/overcloud-deploy-functions.sh
+++ b/lib/overcloud-deploy-functions.sh
@@ -55,15 +55,15 @@ function overcloud_deploy {
# Make sure the correct overcloud image is available
- if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
- echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
+ if [ ! -f $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
+ echo "${red} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
exit 1
fi
echo "Copying overcloud image to Undercloud"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
- scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
+ scp ${SSH_OPTIONS[@]} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
# Install ovs-dpdk inside the overcloud image if it is enabled.
if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' || "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
@@ -128,10 +128,22 @@ EOI
fi
# Set ODL version accordingly
- if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['odl_version']}" == 'boron' ]]; then
+ if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && -n "${deploy_options_array['odl_version']}" ]]; then
+ case "${deploy_options_array['odl_version']}" in
+ beryllium) odl_version=''
+ ;;
+ boron) odl_version='boron'
+ ;;
+ carbon) odl_version='master'
+ ;;
+ *) echo -e "${red}Invalid ODL version ${deploy_options_array['odl_version']}. Please use 'carbon' or 'boron' values.${reset}"
+ exit 1
+ ;;
+ esac
+
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
- --run-command "yum -y install /root/boron/*" \
+ --run-command "yum -y install /root/${odl_version}/*" \
-a overcloud-full.qcow2
EOI
fi
@@ -204,8 +216,10 @@ EOI
DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
fi
- # make sure ceph is installed
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
+ # check if ceph should be enabled
+ if [ "${deploy_options_array['ceph']}" == 'True' ]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
+ fi
#DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
DEPLOY_OPTIONS+=" -e network-environment.yaml"
@@ -247,22 +261,22 @@ EOI
DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
fi
- DEPLOY_OPTIONS+=" -e opnfv-environment.yaml"
+ DEPLOY_OPTIONS+=" -e ${ENV_FILE}"
echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
if [ "${deploy_options_array['tacker']}" == 'False' ]; then
- sed -i '/EnableTacker:/c\ EnableTacker: false' opnfv-environment.yaml
+ sed -i '/EnableTacker:/c\ EnableTacker: false' ${ENV_FILE}
fi
# Create a key for use by nova for live migration
echo "Creating nova SSH key for nova resize support"
ssh-keygen -f nova_id_rsa -b 1024 -P ""
public_key=\'\$(cat nova_id_rsa.pub | cut -d ' ' -f 2)\'
-sed -i "s#replace_public_key:#key: \$public_key#g" opnfv-environment.yaml
-python -c 'open("opnfv-environment-new.yaml", "w").write((open("opnfv-environment.yaml").read().replace("replace_private_key:", "key: \"" + "".join(open("nova_id_rsa").readlines()).replace("\\n","\\\n") + "\"")))'
-mv -f opnfv-environment-new.yaml opnfv-environment.yaml
+sed -i "s#replace_public_key:#key: \$public_key#g" ${ENV_FILE}
+python -c 'open("opnfv-environment-new.yaml", "w").write((open("${ENV_FILE}").read().replace("replace_private_key:", "key: \"" + "".join(open("nova_id_rsa").readlines()).replace("\\n","\\\n") + "\"")))'
+mv -f opnfv-environment-new.yaml ${ENV_FILE}
source stackrc
set -o errexit
@@ -275,11 +289,19 @@ openstack overcloud image upload
echo "Configuring undercloud and discovering nodes"
openstack baremetal import --json instackenv.json
-openstack baremetal configure boot
+
bash -x set_perf_images.sh ${performance_roles[@]}
-#if [[ -z "$virtual" ]]; then
-# openstack baremetal introspection bulk start
-#fi
+if [[ -z "$virtual" ]]; then
+ openstack baremetal introspection bulk start
+ if [[ -n "$root_disk_list" ]]; then
+ openstack baremetal configure boot --root-device=${root_disk_list}
+ else
+ openstack baremetal configure boot
+ fi
+else
+ openstack baremetal configure boot
+fi
+
echo "Configuring flavors"
for flavor in baremetal control compute; do
echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
@@ -300,7 +322,7 @@ for dns_server in ${dns_servers}; do
dns_server_ext="\${dns_server_ext} --dns-nameserver \${dns_server}"
done
neutron subnet-update \$(neutron subnet-list | grep -Ev "id|tenant|external|storage" | grep -v \\\\-\\\\- | awk {'print \$2'}) \${dns_server_ext}
-sed -i '/CloudDomain:/c\ CloudDomain: '${domain_name} opnfv-environment.yaml
+sed -i '/CloudDomain:/c\ CloudDomain: '${domain_name} ${ENV_FILE}
echo "Executing overcloud deployment, this should run for an extended period without output."
sleep 60 #wait for Hypervisor stats to check-in to nova
# save deploy command so it can be used for debugging
diff --git a/lib/parse-functions.sh b/lib/parse-functions.sh
index 84da75c5..94eac01a 100755
--- a/lib/parse-functions.sh
+++ b/lib/parse-functions.sh
@@ -25,7 +25,7 @@ parse_network_settings() {
done
fi
- if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $CONFIG/network-environment.yaml $parse_ext); then
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $BASE/network-environment.yaml $parse_ext); then
echo -e "${blue}${output}${reset}"
eval "$output"
else
@@ -59,6 +59,7 @@ parse_deploy_settings() {
##params: none
##usage: parse_inventory_file
parse_inventory_file() {
+ local output
if [ "$virtual" == "TRUE" ]; then inv_virt="--virtual"; fi
if [[ "$ha_enabled" == "True" ]]; then inv_ha="--ha"; fi
instackenv_output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha)
@@ -69,5 +70,12 @@ cat > instackenv.json << EOF
$instackenv_output
EOF
EOI
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha --export-bash); then
+ echo -e "${blue}${output}${reset}"
+ eval "$output"
+ else
+ echo -e "${red}ERROR: Failed to parse inventory bash settings file ${INVENTORY_FILE}${reset}"
+ exit 1
+ fi
}
diff --git a/lib/post-install-functions.sh b/lib/post-install-functions.sh
index 604eb70d..51287c4b 100755
--- a/lib/post-install-functions.sh
+++ b/lib/post-install-functions.sh
@@ -13,7 +13,10 @@
function configure_post_install {
local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip af external_network_ipv6
external_network_ipv6=False
- opnfv_attach_networks="admin external"
+ opnfv_attach_networks="admin"
+ if [[ $enabled_network_list =~ "external" ]]; then
+ opnfv_attach_networks+=' external'
+ fi
echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
@@ -39,7 +42,7 @@ EOI
else
echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
# use last IP of allocation pool
- eval "ip_range=\${${network}_usable_ip_range}"
+ eval "ip_range=\${${network}_overcloud_ip_range}"
ovs_ip=${ip_range##*,}
eval "net_cidr=\${${network}_cidr}"
if [[ $ovs_ip =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
@@ -90,25 +93,28 @@ echo "Configuring Neutron external network"
if [[ -n "$external_nic_mapping_compute_vlan" && "$external_nic_mapping_compute_vlan" != 'native' ]]; then
neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type vlan --provider:segmentation_id ${external_nic_mapping_compute_vlan} --provider:physical_network datacentre
else
- neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }')
+ neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type flat --provider:physical_network datacentre
fi
if [ "$external_network_ipv6" == "True" ]; then
neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') external --ip_version 6 --ipv6_ra_mode slaac --ipv6_address_mode slaac --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
-else
+elif [[ "$enabled_network_list" =~ "external" ]]; then
neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
+else
+ # we re-use the introspection range for floating ips with single admin network
+ neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${admin_gateway} --allocation-pool start=${admin_introspection_range%%,*},end=${admin_introspection_range##*,} ${admin_cidr}
fi
echo "Removing sahara endpoint and service"
sahara_service_id=\$(openstack service list | grep sahara | cut -d ' ' -f 2)
sahara_endpoint_id=\$(openstack endpoint list | grep sahara | cut -d ' ' -f 2)
-openstack endpoint delete \$sahara_endpoint_id
-openstack service delete \$sahara_service_id
+[[ -n "\$sahara_endpoint_id" ]] && openstack endpoint delete \$sahara_endpoint_id
+[[ -n "\$sahara_service_id" ]] && openstack service delete \$sahara_service_id
echo "Removing swift endpoint and service"
swift_service_id=\$(openstack service list | grep swift | cut -d ' ' -f 2)
swift_endpoint_id=\$(openstack endpoint list | grep swift | cut -d ' ' -f 2)
-openstack endpoint delete \$swift_endpoint_id
-openstack service delete \$swift_service_id
+[[ -n "\$swift_endpoint_id" ]] && openstack endpoint delete \$swift_endpoint_id
+[[ -n "\$swift_service_id" ]] && openstack service delete \$swift_service_id
if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
for flavor in \$(openstack flavor list -c Name -f value); do
@@ -117,7 +123,9 @@ if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_a
done
fi
-if [ "${deploy_options_array['congress']}" == 'True' ]; then
+# TODO: Change this back to True once everything is back in
+# place with tht and puppet-congress for deployment
+if [ "${deploy_options_array['congress']}" == 'NeverTrue' ]; then
ds_configs="--config username=\$OS_USERNAME
--config tenant_name=\$OS_TENANT_NAME
--config password=\$OS_PASSWORD
@@ -145,8 +153,13 @@ EOI
# for virtual, we NAT external network through Undercloud
# same goes for baremetal if only jumphost has external connectivity
if [ "$virtual" == "TRUE" ] || ! test_overcloud_connectivity && [ "$external_network_ipv6" != "True" ]; then
- if ! configure_undercloud_nat ${external_cidr}; then
- echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${external_cidr}${reset}"
+ if [[ "$enabled_network_list" =~ "external" ]]; then
+ nat_cidr=${external_cidr}
+ else
+ nat_cidr=${admin_cidr}
+ fi
+ if ! configure_undercloud_nat ${nat_cidr}; then
+ echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${nat_cidr}${reset}"
exit 1
else
echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud external network${reset}"
@@ -208,7 +221,7 @@ done
# Print out the undercloud IP and dashboard URL
source stackrc
echo "Undercloud IP: $UNDERCLOUD, please connect by doing 'opnfv-util undercloud'"
-echo "Overcloud dashboard available at http://\$(openstack stack output show overcloud PublicVip | sed 's/"//g')/dashboard"
+echo "Overcloud dashboard available at http://\$(openstack stack output show overcloud PublicVip -f json | jq -r .output_value)/dashboard"
EOI
if [[ "$ha_enabled" == 'True' ]]; then
diff --git a/lib/python/apex/common/constants.py b/lib/python/apex/common/constants.py
index 741bb4f8..3aa28eab 100644
--- a/lib/python/apex/common/constants.py
+++ b/lib/python/apex/common/constants.py
@@ -27,3 +27,4 @@ COMPUTE_PRE = "OS::TripleO::ComputeExtraConfigPre"
CONTROLLER_PRE = "OS::TripleO::ControllerExtraConfigPre"
PRE_CONFIG_DIR = "/usr/share/openstack-tripleo-heat-templates/puppet/" \
"extraconfig/pre_deploy/"
+DEFAULT_ROOT_DEV = 'sda'
diff --git a/lib/python/apex/common/utils.py b/lib/python/apex/common/utils.py
index d623638c..8e6896fa 100644
--- a/lib/python/apex/common/utils.py
+++ b/lib/python/apex/common/utils.py
@@ -21,3 +21,11 @@ def parse_yaml(yaml_file):
with open(yaml_file) as f:
parsed_dict = yaml.safe_load(f)
return parsed_dict
+
+
+def write_str(bash_str, path=None):
+ if path:
+ with open(path, 'w') as file:
+ file.write(bash_str)
+ else:
+ print(bash_str)
diff --git a/lib/python/apex/deploy_settings.py b/lib/python/apex/deploy_settings.py
index 5490c6e9..3133d7f8 100644
--- a/lib/python/apex/deploy_settings.py
+++ b/lib/python/apex/deploy_settings.py
@@ -11,6 +11,8 @@
import yaml
import logging
+from .common import utils
+
REQ_DEPLOY_SETTINGS = ['sdn_controller',
'odl_version',
'sdn_l3',
@@ -19,9 +21,10 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller',
'dataplane',
'sfc',
'vpn',
- 'vpp']
+ 'vpp',
+ 'ceph']
-OPT_DEPLOY_SETTINGS = ['performance', 'vsperf']
+OPT_DEPLOY_SETTINGS = ['performance', 'vsperf', 'ceph_device']
VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
VALID_PERF_OPTS = ['kernel', 'nova', 'vpp']
@@ -38,7 +41,7 @@ class DeploySettings(dict):
"""
def __init__(self, filename):
init_dict = {}
- if type(filename) is str:
+ if isinstance(filename, str):
with open(filename, 'r') as deploy_settings_file:
init_dict = yaml.safe_load(deploy_settings_file)
else:
@@ -81,6 +84,8 @@ class DeploySettings(dict):
if req_set not in deploy_options:
if req_set == 'dataplane':
self['deploy_options'][req_set] = 'ovs'
+ elif req_set == 'ceph':
+ self['deploy_options'][req_set] = True
else:
self['deploy_options'][req_set] = False
@@ -162,12 +167,7 @@ class DeploySettings(dict):
if 'performance' in self['deploy_options']:
bash_str += self._dump_performance()
bash_str += self._dump_deploy_options_array()
-
- if path:
- with open(path, 'w') as file:
- file.write(bash_str)
- else:
- print(bash_str)
+ utils.write_str(bash_str, path)
class DeploySettingsException(Exception):
diff --git a/lib/python/apex/inventory.py b/lib/python/apex/inventory.py
index aa219680..ce16ef41 100644
--- a/lib/python/apex/inventory.py
+++ b/lib/python/apex/inventory.py
@@ -10,21 +10,25 @@
import yaml
import json
+from .common import constants
+from .common import utils
+
class Inventory(dict):
"""
This class parses an APEX inventory yaml file into an object. It
generates or detects all missing fields for deployment.
- It then collapses one level of identifcation from the object to
+ It then collapses one level of identification from the object to
convert it to a structure that can be dumped into a json file formatted
such that Triple-O can read the resulting json as an instackenv.json file.
"""
def __init__(self, source, ha=True, virtual=False):
init_dict = {}
- if type(source) is str:
- with open(source, 'r') as network_settings_file:
- yaml_dict = yaml.safe_load(network_settings_file)
+ self.root_device = constants.DEFAULT_ROOT_DEV
+ if isinstance(source, str):
+ with open(source, 'r') as inventory_file:
+ yaml_dict = yaml.safe_load(inventory_file)
# collapse node identifiers from the structure
init_dict['nodes'] = list(map(lambda n: n[1],
yaml_dict['nodes'].items()))
@@ -40,8 +44,13 @@ class Inventory(dict):
node['pm_user'] = node['ipmi_user']
node['mac'] = [node['mac_address']]
- for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address'):
- del i
+ for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
+ 'disk_device'):
+ if i == 'disk_device' and 'disk_device' in node.keys():
+ self.root_device = node[i]
+ else:
+ continue
+ del node[i]
return node
@@ -53,7 +62,7 @@ class Inventory(dict):
'nodes for HA baremetal deployment')
elif len(self['nodes']) < 2:
raise InventoryException('You must provide at least 2 nodes '
- 'for non-HA baremetal deployment${reset}')
+ 'for non-HA baremetal deployment')
if virtual:
self['arch'] = 'x86_64'
@@ -67,6 +76,16 @@ class Inventory(dict):
def dump_instackenv_json(self):
print(json.dumps(dict(self), sort_keys=True, indent=4))
+ def dump_bash(self, path=None):
+ """
+ Prints settings for bash consumption.
+
+ If optional path is provided, bash string will be written to the file
+ instead of stdout.
+ """
+ bash_str = "{}={}\n".format('root_disk_list', str(self.root_device))
+ utils.write_str(bash_str, path)
+
class InventoryException(Exception):
def __init__(self, value):
diff --git a/lib/python/apex/network_environment.py b/lib/python/apex/network_environment.py
index ae5c602c..4fc6f583 100644
--- a/lib/python/apex/network_environment.py
+++ b/lib/python/apex/network_environment.py
@@ -21,6 +21,7 @@ from .common.constants import (
COMPUTE_PRE,
PRE_CONFIG_DIR
)
+from .network_settings import NetworkSettings
HEAT_NONE = 'OS::Heat::None'
PORTS = '/ports'
@@ -63,15 +64,13 @@ class NetworkEnvironment(dict):
Create Network Environment according to Network Settings
"""
init_dict = {}
- if type(filename) is str:
+ if isinstance(filename, str):
with open(filename, 'r') as net_env_fh:
init_dict = yaml.safe_load(net_env_fh)
super().__init__(init_dict)
- try:
- enabled_nets = net_settings.enabled_network_list
- except:
- raise NetworkEnvException('Invalid Network Setting object')
+ if not isinstance(net_settings, NetworkSettings):
+ raise NetworkEnvException('Invalid Network Settings object')
self._set_tht_dir()
@@ -86,19 +85,21 @@ class NetworkEnvironment(dict):
nets[ADMIN_NETWORK]['installer_vm']['ip']
self[param_def]['DnsServers'] = net_settings['dns_servers']
- if EXTERNAL_NETWORK in enabled_nets:
- external_cidr = nets[EXTERNAL_NETWORK][0]['cidr']
+ if EXTERNAL_NETWORK in net_settings.enabled_network_list:
+ external_cidr = net_settings.get_network(EXTERNAL_NETWORK)['cidr']
self[param_def]['ExternalNetCidr'] = str(external_cidr)
- if type(nets[EXTERNAL_NETWORK][0]['installer_vm']['vlan']) is int:
+ external_vlan = self._get_vlan(net_settings.get_network(
+ EXTERNAL_NETWORK))
+ if isinstance(external_vlan, int):
self[param_def]['NeutronExternalNetworkBridge'] = '""'
- self[param_def]['ExternalNetworkVlanID'] = \
- nets[EXTERNAL_NETWORK][0]['installer_vm']['vlan']
- external_range = nets[EXTERNAL_NETWORK][0]['usable_ip_range']
+ self[param_def]['ExternalNetworkVlanID'] = external_vlan
+ external_range = net_settings.get_network(EXTERNAL_NETWORK)[
+ 'overcloud_ip_range']
self[param_def]['ExternalAllocationPools'] = \
[{'start': str(external_range[0]),
'end': str(external_range[1])}]
self[param_def]['ExternalInterfaceDefaultRoute'] = \
- nets[EXTERNAL_NETWORK][0]['gateway']
+ net_settings.get_network(EXTERNAL_NETWORK)['gateway']
if external_cidr.version == 6:
postfix = '/external_v6.yaml'
@@ -110,8 +111,8 @@ class NetworkEnvironment(dict):
# apply resource registry update for EXTERNAL_RESOURCES
self._config_resource_reg(EXTERNAL_RESOURCES, postfix)
- if TENANT_NETWORK in enabled_nets:
- tenant_range = nets[TENANT_NETWORK]['usable_ip_range']
+ if TENANT_NETWORK in net_settings.enabled_network_list:
+ tenant_range = nets[TENANT_NETWORK]['overcloud_ip_range']
self[param_def]['TenantAllocationPools'] = \
[{'start': str(tenant_range[0]),
'end': str(tenant_range[1])}]
@@ -123,7 +124,7 @@ class NetworkEnvironment(dict):
postfix = '/tenant.yaml'
tenant_vlan = self._get_vlan(nets[TENANT_NETWORK])
- if type(tenant_vlan) is int:
+ if isinstance(tenant_vlan, int):
self[param_def]['TenantNetworkVlanID'] = tenant_vlan
else:
postfix = '/noop.yaml'
@@ -131,8 +132,8 @@ class NetworkEnvironment(dict):
# apply resource registry update for TENANT_RESOURCES
self._config_resource_reg(TENANT_RESOURCES, postfix)
- if STORAGE_NETWORK in enabled_nets:
- storage_range = nets[STORAGE_NETWORK]['usable_ip_range']
+ if STORAGE_NETWORK in net_settings.enabled_network_list:
+ storage_range = nets[STORAGE_NETWORK]['overcloud_ip_range']
self[param_def]['StorageAllocationPools'] = \
[{'start': str(storage_range[0]),
'end': str(storage_range[1])}]
@@ -143,7 +144,7 @@ class NetworkEnvironment(dict):
else:
postfix = '/storage.yaml'
storage_vlan = self._get_vlan(nets[STORAGE_NETWORK])
- if type(storage_vlan) is int:
+ if isinstance(storage_vlan, int):
self[param_def]['StorageNetworkVlanID'] = storage_vlan
else:
postfix = '/noop.yaml'
@@ -151,8 +152,8 @@ class NetworkEnvironment(dict):
# apply resource registry update for STORAGE_RESOURCES
self._config_resource_reg(STORAGE_RESOURCES, postfix)
- if API_NETWORK in enabled_nets:
- api_range = nets[API_NETWORK]['usable_ip_range']
+ if API_NETWORK in net_settings.enabled_network_list:
+ api_range = nets[API_NETWORK]['overcloud_ip_range']
self[param_def]['InternalApiAllocationPools'] = \
[{'start': str(api_range[0]),
'end': str(api_range[1])}]
@@ -163,7 +164,7 @@ class NetworkEnvironment(dict):
else:
postfix = '/internal_api.yaml'
api_vlan = self._get_vlan(nets[API_NETWORK])
- if type(api_vlan) is int:
+ if isinstance(api_vlan, int):
self[param_def]['InternalApiNetworkVlanID'] = api_vlan
else:
postfix = '/noop.yaml'
@@ -184,9 +185,9 @@ class NetworkEnvironment(dict):
self[param_def][flag] = True
def _get_vlan(self, network):
- if type(network['nic_mapping'][CONTROLLER]['vlan']) is int:
+ if isinstance(network['nic_mapping'][CONTROLLER]['vlan'], int):
return network['nic_mapping'][CONTROLLER]['vlan']
- elif type(network['nic_mapping'][COMPUTE]['vlan']) is int:
+ elif isinstance(network['nic_mapping'][COMPUTE]['vlan'], int):
return network['nic_mapping'][COMPUTE]['vlan']
else:
return 'native'
diff --git a/lib/python/apex/network_settings.py b/lib/python/apex/network_settings.py
index c9f7d450..b04f141a 100644
--- a/lib/python/apex/network_settings.py
+++ b/lib/python/apex/network_settings.py
@@ -12,9 +12,8 @@ import logging
import ipaddress
from copy import copy
-
-from . import ip_utils
from .common import utils
+from . import ip_utils
from .common.constants import (
CONTROLLER,
COMPUTE,
@@ -42,7 +41,7 @@ class NetworkSettings(dict):
"""
def __init__(self, filename):
init_dict = {}
- if type(filename) is str:
+ if isinstance(filename, str):
with open(filename, 'r') as network_settings_file:
init_dict = yaml.safe_load(network_settings_file)
else:
@@ -55,7 +54,7 @@ class NetworkSettings(dict):
def merge(pri, sec):
for key, val in sec.items():
if key in pri:
- if type(val) is dict:
+ if isinstance(val, dict):
merge(pri[key], val)
# else
# do not overwrite what's already there
@@ -71,7 +70,14 @@ class NetworkSettings(dict):
def get_network(self, network):
if network == EXTERNAL_NETWORK and self['networks'][network]:
- return self['networks'][network][0]
+ for net in self['networks'][network]:
+ if 'public' in net:
+ return net
+
+ raise NetworkSettingsException("The external network, "
+ "'public', should be defined "
+ "when external networks are "
+ "enabled")
else:
return self['networks'][network]
@@ -92,14 +98,11 @@ class NetworkSettings(dict):
if _network.get('enabled', True):
logging.info("{} enabled".format(network))
self._config_required_settings(network)
- if network == EXTERNAL_NETWORK:
- nicmap = _network['nic_mapping']
- else:
- nicmap = _network['nic_mapping']
+ nicmap = _network['nic_mapping']
iface = nicmap[CONTROLLER]['members'][0]
self._config_ip_range(network=network,
interface=iface,
- ip_range='usable_ip_range',
+ ip_range='overcloud_ip_range',
start_offset=21, end_offset=21)
self.enabled_network_list.append(network)
self._validate_overcloud_nic_order(network)
@@ -137,7 +140,7 @@ class NetworkSettings(dict):
if interfaces:
interface = interfaces[0]
- if type(_role.get('vlan', 'native')) is not int and \
+ if not isinstance(_role.get('vlan', 'native'), int) and \
any(y == interface for x, y in self.nics[role].items()):
raise NetworkSettingsException(
"Duplicate {} already specified for "
@@ -183,7 +186,6 @@ class NetworkSettings(dict):
ip = ipaddress.ip_address(_network['installer_vm']['ip'])
nic_if = ip_utils.get_interface(ucloud_if_list[0], ip.version)
if nic_if:
- ucloud_if_list = [nic_if]
logging.info("{}_bridged_interface: {}".
format(network, nic_if))
else:
@@ -312,16 +314,16 @@ class NetworkSettings(dict):
flatten lists to delim separated strings
flatten dics to underscored key names and string values
"""
- if type(obj) is list:
+ if isinstance(obj, list):
return "{}=\'{}\'\n".format(name,
delim.join(map(lambda x: str(x),
obj)))
- elif type(obj) is dict:
+ elif isinstance(obj, dict):
flat_str = ''
for k in obj:
flat_str += flatten("{}_{}".format(name, k), obj[k])
return flat_str
- elif type(obj) is str:
+ elif isinstance(obj, str):
return "{}='{}'\n".format(name, obj)
else:
return "{}={}\n".format(name, str(obj))
@@ -336,11 +338,7 @@ class NetworkSettings(dict):
bash_str += flatten('dns_servers', self['dns_servers'], ' ')
bash_str += flatten('domain_name', self['dns-domain'], ' ')
bash_str += flatten('ntp_server', self['ntp_servers'][0], ' ')
- if path:
- with open(path, 'w') as file:
- file.write(bash_str)
- else:
- print(bash_str)
+ utils.write_str(bash_str, path)
def get_ip_addr_family(self,):
"""
diff --git a/lib/python/apex_python_utils.py b/lib/python/apex_python_utils.py
index b0ebb270..e21d0464 100755
--- a/lib/python/apex_python_utils.py
+++ b/lib/python/apex_python_utils.py
@@ -22,7 +22,6 @@ from apex import NetworkEnvironment
from apex import DeploySettings
from apex import Inventory
from apex import ip_utils
-from apex.common.constants import ADMIN_NETWORK
def parse_net_settings(args):
@@ -66,7 +65,10 @@ def run_clean(args):
def parse_inventory(args):
inventory = Inventory(args.file, ha=args.ha, virtual=args.virtual)
- inventory.dump_instackenv_json()
+ if args.export_bash is True:
+ inventory.dump_bash()
+ else:
+ inventory.dump_instackenv_json()
def find_ip(args):
@@ -200,6 +202,11 @@ def get_parser():
default=False,
action='store_true',
help='Indicate if deployment inventory is virtual')
+ inventory.add_argument('--export-bash',
+ default=False,
+ dest='export_bash',
+ action='store_true',
+ help='Export bash variables from inventory')
inventory.set_defaults(func=parse_inventory)
clean = subparsers.add_parser('clean',
diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh
index 6ba9a545..080fcbbd 100755
--- a/lib/undercloud-functions.sh
+++ b/lib/undercloud-functions.sh
@@ -19,7 +19,7 @@ function setup_undercloud_vm {
define_vm undercloud hd 30 "$undercloud_nets" 4 12288
### this doesn't work for some reason I was getting hangup events so using cp instead
- #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
+ #virsh vol-upload --pool default --vol undercloud.qcow2 --file $BASE/stack/undercloud.qcow2
#2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
#2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
#2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
@@ -28,14 +28,14 @@ function setup_undercloud_vm {
#error: Reconnected to the hypervisor
local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
- cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
+ cp -f $IMAGES/undercloud.qcow2 $undercloud_dst
# resize Undercloud machine
echo "Checking if Undercloud needs to be resized..."
undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$undercloud_size" -lt 30 ]; then
qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
- LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
+ LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $IMAGES/undercloud.qcow2 $undercloud_dst
LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$new_size" -lt 30 ]; then
@@ -71,12 +71,12 @@ function setup_undercloud_vm {
CNT=10
echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
- while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
+ while ! $(arp -en | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
echo -n "."
sleep 10
CNT=$((CNT-1))
done
- UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'})
+ UNDERCLOUD=$(arp -en | grep ${undercloud_mac} | awk {'print $1'})
if [ -z "$UNDERCLOUD" ]; then
echo "\n\nCan't get IP for Undercloud. Can Not Continue."
@@ -136,12 +136,12 @@ function configure_undercloud {
ovs_dpdk_bridge=''
fi
- if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e "br-ex"); then
+ if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e "br-ex"); then
echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
exit 1
fi
- if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
+ if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
exit 1
fi
@@ -207,6 +207,10 @@ openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.$
sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
+if [[ -n "${deploy_options_array['ceph_device']}" ]]; then
+ sed -i '/ExtraConfig/a\\ ceph::profile::params::osds: {\\x27${deploy_options_array['ceph_device']}\\x27: {}}' ${ENV_FILE}
+fi
+
sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
@@ -249,6 +253,7 @@ sudo systemctl restart openstack-heat-api
EOI
# configure external network
+if [[ "$enabled_network_list" =~ "external" ]]; then
ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" << EOI
if [[ "$external_installer_vm_vlan" != "native" ]]; then
cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${external_installer_vm_vlan}
@@ -270,6 +275,7 @@ else
fi
fi
EOI
+fi
# WORKAROUND: must restart the above services to fix sync problem with nova compute manager
# TODO: revisit and file a bug if necessary. This should eventually be removed
diff --git a/lib/utility-functions.sh b/lib/utility-functions.sh
index bf4128a0..c12619ae 100644
--- a/lib/utility-functions.sh
+++ b/lib/utility-functions.sh
@@ -24,7 +24,7 @@ function undercloud_connect {
##outputs the Undercloud's IP address
##params: none
function get_undercloud_ip {
- echo $(arp -a | grep $(virsh domiflist undercloud | grep default |\
+ echo $(arp -an | grep $(virsh domiflist undercloud | grep default |\
awk '{print $5}') | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
}
@@ -80,26 +80,6 @@ function opendaylight_connect {
##outputs heat stack deployment failures
##params: none
function debug_stack {
- local failure_output
- local phys_id
- declare -a resource_arr
- declare -a phys_id_arr
-
source ~/stackrc
-
- IFS=$'\n'
- for resource in $(openstack stack resource list -n 5 overcloud | grep FAILED); do
- unset IFS
- resource_arr=(${resource//|/ })
- phys_id=$(openstack stack resource show ${resource_arr[-1]} ${resource_arr[0]} | grep physical_resource_id 2> /dev/null)
- if [ -n "$phys_id" ]; then
- phys_id_arr=(${phys_id//|/ })
- failure_output+="******************************************************"
- failure_output+="\n${resource}:\n\n$(openstack stack deployment show ${phys_id_arr[-1]} 2> /dev/null)"
- failure_output+="\n******************************************************"
- fi
- unset phys_id
- done
-
- echo -e $failure_output
+ openstack stack failures list overcloud --long
}