summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDan Radez <dradez@redhat.com>2017-03-30 23:03:43 -0400
committerDan Radez <dradez@redhat.com>2017-05-09 15:09:42 -0400
commitac3a86983e4c049a3115c7bd77eeacaeb19d0ca3 (patch)
treeed0c27aa4853848b0e47e3ce3de25dfb23bfc8df /lib
parentef3dc1ce0323fa0881e416cd4b9028fb4250b719 (diff)
Updating Apex to OpenStack Ocata
- power management updated to virtualbmc, pxe_ssh is deprecated - removing custom tacker build - removing custom congress build - disabling yum update in undercloud on the cli instead of in a patch - Undercloud is direct kernel booted now, there are no kernel and initrd in the disk image from upstream - remove OpenDaylight previous to Carbon JIRA: APEX-433 JIRA: APEX-432 JIRA: APEX-431 Change-Id: I6963f16e65eacade5607a3082b58b6150331406c Signed-off-by: Dan Radez <dradez@redhat.com>
Diffstat (limited to 'lib')
-rwxr-xr-xlib/configure-deps-functions.sh7
-rwxr-xr-xlib/configure-vm171
-rw-r--r--lib/installer/domain.xml2
-rwxr-xr-xlib/overcloud-deploy-functions.sh61
-rwxr-xr-xlib/post-install-functions.sh51
-rw-r--r--lib/python/apex/inventory.py2
-rwxr-xr-xlib/undercloud-functions.sh87
-rwxr-xr-xlib/virtual-setup-functions.sh52
8 files changed, 281 insertions, 152 deletions
diff --git a/lib/configure-deps-functions.sh b/lib/configure-deps-functions.sh
index 3c82c66a..32900015 100755
--- a/lib/configure-deps-functions.sh
+++ b/lib/configure-deps-functions.sh
@@ -90,9 +90,14 @@ EOF
fi
done
else
+ # verify virtualbmc is installed for a virtual install
+ if ! rpm -q python2-virtualbmc; then
+ echo -e "${red}ERROR: Package python2-virtualbmc is required to do a virtual install.$reset"
+ exit 1
+ fi
for network in ${OPNFV_NETWORK_TYPES}; do
if ! ovs-vsctl --may-exist add-br ${NET_MAP[$network]}; then
- echo -e "${red}ERROR: Failed to create ovs bridge ${NET_MAP[$network]}{$reset}"
+ echo -e "${red}ERROR: Failed to create ovs bridge ${NET_MAP[$network]}${reset}"
exit 1
fi
echo "${blue}INFO: Creating Virsh Network: $network${reset}"
diff --git a/lib/configure-vm b/lib/configure-vm
new file mode 100755
index 00000000..340a7ab6
--- /dev/null
+++ b/lib/configure-vm
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+
+import argparse
+import math
+import os
+import random
+
+import libvirt
+
+templatedir = os.getenv('LIB', '/var/opt/opnfv/lib') + '/installer/'
+
+MAX_NUM_MACS = math.trunc(0xff/2)
+
+
+def generate_baremetal_macs(count=1):
+ """Generate an Ethernet MAC address suitable for baremetal testing."""
+ # NOTE(dprince): We generate our own bare metal MAC address's here
+ # instead of relying on libvirt so that we can ensure the
+ # locally administered bit is set low. (The libvirt default is
+ # to set the 2nd MSB high.) This effectively allows our
+ # fake baremetal VMs to more accurately behave like real hardware
+ # and fixes issues with bridge/DHCP configurations which rely
+ # on the fact that bridges assume the MAC address of the lowest
+ # attached NIC.
+ # MACs generated for a given machine will also be in sequential
+ # order, which matches how most BM machines are laid out as well.
+ # Additionally we increment each MAC by two places.
+ macs = []
+
+ if count > MAX_NUM_MACS:
+ raise ValueError("The MAX num of MACS supported is %i." % MAX_NUM_MACS)
+
+ base_nums = [0x00,
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ base_mac = ':'.join(map(lambda x: "%02x" % x, base_nums))
+
+ start = random.randint(0x00, 0xff)
+ if (start + (count * 2)) > 0xff:
+ # leave room to generate macs in sequence
+ start = 0xff - count * 2
+ for num in range(0, count*2, 2):
+ mac = start + num
+ macs.append(base_mac + ":" + ("%02x" % mac))
+ return macs
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Configure a kvm virtual machine for the seed image.")
+ parser.add_argument('--name', default='seed',
+ help='the name to give the machine in libvirt.')
+ parser.add_argument('--image',
+ help='Use a custom image file (must be qcow2).')
+ parser.add_argument('--diskbus', default='sata',
+ help='Choose an alternate bus type for the disk')
+ parser.add_argument('--baremetal-interface', nargs='+', default=['brbm'],
+ help='The interface which bare metal nodes will be connected to.')
+ parser.add_argument('--engine', default='kvm',
+ help='The virtualization engine to use')
+ parser.add_argument('--arch', default='i686',
+ help='The architecture to use')
+ parser.add_argument('--memory', default='2097152',
+ help="Maximum memory for the VM in KB.")
+ parser.add_argument('--cpus', default='1',
+ help="CPU count for the VM.")
+ parser.add_argument('--bootdev', default='hd',
+ help="What boot device to use (hd/network).")
+ parser.add_argument('--seed', default=False, action='store_true',
+ help='Create a seed vm with two interfaces.')
+ parser.add_argument('--ovsbridge', default="",
+ help='Place the seed public interface on this ovs bridge.')
+ parser.add_argument('--libvirt-nic-driver', default='virtio',
+ help='The libvirt network driver to use')
+ parser.add_argument('--enable-serial-console', action="store_true",
+ help='Enable a serial console')
+ parser.add_argument('--direct-boot',
+ help='Enable directboot to <value>.{vmlinux & initrd}')
+ parser.add_argument('--kernel-arg', action="append", dest='kernel_args',
+ help='Kernel arguments, use multiple time for multiple args.')
+ parser.add_argument('--uri', default='qemu:///system',
+ help='The server uri with which to connect.')
+ args = parser.parse_args()
+ with file(templatedir + '/domain.xml', 'rb') as f:
+ source_template = f.read()
+ imagefile = '/var/lib/libvirt/images/seed.qcow2'
+ if args.image:
+ imagefile = args.image
+ imagefile = os.path.realpath(imagefile)
+ params = {
+ 'name': args.name,
+ 'imagefile': imagefile,
+ 'engine': args.engine,
+ 'arch': args.arch,
+ 'memory': args.memory,
+ 'cpus': args.cpus,
+ 'bootdev': args.bootdev,
+ 'network': '',
+ 'enable_serial_console': '',
+ 'direct_boot': '',
+ 'kernel_args': '',
+ }
+ if args.image is not None:
+ params['imagefile'] = args.image
+
+ # Configure the bus type for the target disk device
+ params['diskbus'] = args.diskbus
+ nicparams = {
+ 'nicdriver': args.libvirt_nic_driver,
+ 'ovsbridge': args.ovsbridge,
+ }
+ if args.seed:
+ if args.ovsbridge:
+ params['network'] = """
+ <interface type='bridge'>
+ <source bridge='%(ovsbridge)s'/>
+ <virtualport type='openvswitch'/>
+ <model type='%(nicdriver)s'/>
+ </interface>""" % nicparams
+ else:
+ params['network'] = """
+ <!-- regular natted network, for access to the vm -->
+ <interface type='network'>
+ <source network='default'/>
+ <model type='%(nicdriver)s'/>
+ </interface>""" % nicparams
+
+ macs = generate_baremetal_macs(len(args.baremetal_interface))
+
+ params['bm_network'] = ""
+ for bm_interface, mac in zip(args.baremetal_interface, macs):
+ bm_interface_params = {
+ 'bminterface': bm_interface,
+ 'bmmacaddress': mac,
+ 'nicdriver': args.libvirt_nic_driver,
+ }
+ params['bm_network'] += """
+ <!-- bridged 'bare metal' network on %(bminterface)s -->
+ <interface type='network'>
+ <mac address='%(bmmacaddress)s'/>
+ <source network='%(bminterface)s'/>
+ <model type='%(nicdriver)s'/>
+ </interface>""" % bm_interface_params
+
+ if args.enable_serial_console:
+ params['enable_serial_console'] = """
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ """
+ if args.direct_boot:
+ params['direct_boot'] = """
+ <kernel>/var/lib/libvirt/images/%(direct_boot)s.vmlinuz</kernel>
+ <initrd>/var/lib/libvirt/images/%(direct_boot)s.initrd</initrd>
+ """ % { 'direct_boot': args.direct_boot }
+ if args.kernel_args:
+ params['kernel_args'] = """
+ <cmdline>%s</cmdline>
+ """ % ' '.join(args.kernel_args)
+
+ libvirt_template = source_template % params
+ conn=libvirt.open(args.uri)
+ a = conn.defineXML(libvirt_template)
+ print ("Created machine %s with UUID %s" % (args.name, a.UUIDString()))
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/installer/domain.xml b/lib/installer/domain.xml
index c710e561..ead0de69 100644
--- a/lib/installer/domain.xml
+++ b/lib/installer/domain.xml
@@ -7,6 +7,8 @@
<type arch='%(arch)s'>hvm</type>
<boot dev='%(bootdev)s'/>
<bootmenu enable='no'/>
+ %(direct_boot)s
+ %(kernel_args)s
</os>
<features>
<acpi/>
diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh
index e5f9134b..f26a72a3 100755
--- a/lib/overcloud-deploy-functions.sh
+++ b/lib/overcloud-deploy-functions.sh
@@ -42,7 +42,7 @@ function overcloud_deploy {
DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-honeycomb-l2.yaml"
fi
else
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-l3.yaml"
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight.yaml"
fi
SDN_IMAGE=opendaylight
elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
@@ -256,22 +256,20 @@ EOI
# Set ODL version accordingly
if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && -n "${deploy_options_array['odl_version']}" ]]; then
case "${deploy_options_array['odl_version']}" in
- beryllium) odl_version=''
+ carbon) odl_version=''
;;
- boron) odl_version='boron'
- ;;
- carbon) odl_version='master'
- ;;
- *) echo -e "${red}Invalid ODL version ${deploy_options_array['odl_version']}. Please use 'carbon' or 'boron' values.${reset}"
+ *) echo -e "${red}Invalid ODL version ${deploy_options_array['odl_version']}. Please use 'carbon'.${reset}"
exit 1
;;
esac
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
- --run-command "yum -y install /root/${odl_version}/*" \
- -a overcloud-full.qcow2
+ if [[ -n "$odl_version" ]]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
+ --run-command "yum -y install /root/${odl_version}/*" \
+ -a overcloud-full.qcow2
EOI
+ fi
fi
# Override ODL if we enable netvirt for fdio
@@ -333,6 +331,9 @@ EOI
DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
if [[ "$virtual" == "TRUE" ]]; then
DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
+ # double check the status of the vbmc devices
+ # TODO add some validation logic here
+ vbmc list
fi
echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
@@ -356,33 +357,25 @@ echo "Uploading overcloud glance images"
openstack overcloud image upload
echo "Configuring undercloud and discovering nodes"
-openstack baremetal import --json instackenv.json
+
if [[ -z "$virtual" ]]; then
- openstack baremetal introspection bulk start
- if [[ -n "$root_disk_list" ]]; then
- openstack baremetal configure boot --root-device=${root_disk_list}
- else
- openstack baremetal configure boot
- fi
+ openstack overcloud node import instackenv.json
+ openstack overcloud node introspect --all-manageable --provide
+ #if [[ -n "$root_disk_list" ]]; then
+ # TODO: replace node configure boot with ironic node-update
+ # TODO: configure boot is not used in ocata here anymore
+ #openstack overcloud node configure boot --root-device=${root_disk_list}
+ #https://github.com/openstack/tripleo-quickstart-extras/blob/master/roles/overcloud-prep-images/templates/overcloud-prep-images.sh.j2#L73-L130
+ #ironic node-update $ironic_node add properties/root_device='{"{{ node['key'] }}": "{{ node['value'] }}"}'
+ #fi
else
- openstack baremetal configure boot
+ openstack overcloud node import --provide instackenv.json
fi
-echo "Configuring flavors"
-for flavor in baremetal control compute; do
- echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
- if openstack flavor list | grep \${flavor}; then
- openstack flavor delete \${flavor}
- fi
- openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 \${flavor}
- if ! openstack flavor list | grep \${flavor}; then
- echo -e "${red}ERROR: Unable to create flavor \${flavor}${reset}"
- fi
-done
-openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
-openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="control" control
-openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="compute" compute
+openstack flavor set --property "cpu_arch"="x86_64" baremetal
+openstack flavor set --property "cpu_arch"="x86_64" control
+openstack flavor set --property "cpu_arch"="x86_64" compute
echo "Configuring nameserver on ctlplane network"
dns_server_ext=''
for dns_server in ${dns_servers}; do
@@ -390,7 +383,7 @@ for dns_server in ${dns_servers}; do
done
neutron subnet-update \$(neutron subnet-list | grep -Ev "id|tenant|external|storage" | grep -v \\\\-\\\\- | awk {'print \$2'}) \${dns_server_ext}
sed -i '/CloudDomain:/c\ CloudDomain: '${domain_name} ${ENV_FILE}
-echo "Executing overcloud deployment, this should run for an extended period without output."
+echo "Executing overcloud deployment, this could run for an extended period without output."
sleep 60 #wait for Hypervisor stats to check-in to nova
# save deploy command so it can be used for debugging
cat > deploy_command << EOF
diff --git a/lib/post-install-functions.sh b/lib/post-install-functions.sh
index a93ad541..7678b0d3 100755
--- a/lib/post-install-functions.sh
+++ b/lib/post-install-functions.sh
@@ -91,36 +91,37 @@ source overcloudrc
set -o errexit
echo "Configuring Neutron external network"
if [[ -n "$external_nic_mapping_compute_vlan" && "$external_nic_mapping_compute_vlan" != 'native' ]]; then
- neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type vlan --provider:segmentation_id ${external_nic_mapping_compute_vlan} --provider:physical_network datacentre
+ openstack network create external --project service --external --provider-network-type vlan --provider-segment $external_nic_mapping_compute_vlan --provider-physical-network datacentre
else
- neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type flat --provider:physical_network datacentre
+ openstack network create external --project service --external --provider-network-type flat --provider-physical-network datacentre
fi
if [ "$external_network_ipv6" == "True" ]; then
- neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') external --ip_version 6 --ipv6_ra_mode slaac --ipv6_address_mode slaac --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
+ openstack subnet create external-subnet --project service --network external --no-dhcp --gateway $external_gateway --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} --subnet-range $external_cidr --ip-version 6 --ipv6-ra-mode slaac --ipv6-address-mode slaac
elif [[ "$enabled_network_list" =~ "external" ]]; then
- neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
+ openstack subnet create external-subnet --project service --network external --no-dhcp --gateway $external_gateway --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} --subnet-range $external_cidr
else
# we re-use the introspection range for floating ips with single admin network
- neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${admin_gateway} --allocation-pool start=${admin_introspection_range%%,*},end=${admin_introspection_range##*,} ${admin_cidr}
+ openstack subnet create external-subnet --project service --network external --no-dhcp --gateway $admin_gateway --allocation-pool start=${admin_introspection_range%%,*},end=${admin_introspection_range##*,} --subnet-range $admin_cidr
fi
if [ "${deploy_options_array['gluon']}" == 'True' ]; then
echo "Creating Gluon dummy network and subnet"
- neutron net-create --shared --provider:network_type vxlan GluonNetwork
- neutron subnet-create --name GluonSubnet --no-gateway --disable-dhcp GluonNetwork 0.0.0.0/1
+ openstack network create gluon-network --share --provider-network-type vxlan
+ openstack subnet create gluon-subnet --no-gateway --no-dhcp --network GluonNetwork --subnet-range 0.0.0.0/1
fi
-echo "Removing sahara endpoint and service"
-sahara_service_id=\$(openstack service list | grep sahara | cut -d ' ' -f 2)
-sahara_endpoint_id=\$(openstack endpoint list | grep sahara | cut -d ' ' -f 2)
-[[ -n "\$sahara_endpoint_id" ]] && openstack endpoint delete \$sahara_endpoint_id
-[[ -n "\$sahara_service_id" ]] && openstack service delete \$sahara_service_id
-
-echo "Removing swift endpoint and service"
-swift_service_id=\$(openstack service list | grep swift | cut -d ' ' -f 2)
-swift_endpoint_id=\$(openstack endpoint list | grep swift | cut -d ' ' -f 2)
-[[ -n "\$swift_endpoint_id" ]] && openstack endpoint delete \$swift_endpoint_id
-[[ -n "\$swift_service_id" ]] && openstack service delete \$swift_service_id
+# Fix project_id and os_tenant_name not in overcloudrc
+# Deprecated openstack client does not need project_id
+# and os_tenant_name anymore but glance client and
+# Rally in general does need it.
+# REMOVE when not needed in Rally/glance-client anymore.
+if ! grep -q "OS_PROJECT_ID" ./overcloudrc;then
+ project_id=\$(openstack project list |grep admin|awk '{print \$2}')
+ echo "export OS_PROJECT_ID=\$project_id" >> ./overcloudrc
+fi
+if ! grep -q "OS_TENANT_NAME" ./overcloudrc;then
+ echo "export OS_TENANT_NAME=admin" >> ./overcloudrc
+fi
if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
for flavor in \$(openstack flavor list -c Name -f value); do
@@ -160,20 +161,6 @@ if [ "${deploy_options_array['congress']}" == 'True' ]; then
fi
-# Fix project_id and os_tenant_name not in overcloudrc
-# Deprecated openstack client does not need project_id
-# and os_tenant_name anymore but glance client and
-# Rally in generall does need it.
-# REMOVE when not needed in Rally/glance-client anymore.
-if ! grep -q "OS_PROJECT_ID" ./overcloudrc;then
- project_id=\$(openstack project list |grep admin|awk '{print \$2}')
- echo "export OS_PROJECT_ID=\$project_id" >> ./overcloudrc
-fi
-if ! grep -q "OS_TENANT_NAME" ./overcloudrc;then
- echo "export OS_TENANT_NAME=admin" >> ./overcloudrc
-fi
-
-
EOI
# we need to restart neutron-server in Gluon deployments to allow the Gluon core
diff --git a/lib/python/apex/inventory.py b/lib/python/apex/inventory.py
index ce16ef41..2e08d3b9 100644
--- a/lib/python/apex/inventory.py
+++ b/lib/python/apex/inventory.py
@@ -43,6 +43,8 @@ class Inventory(dict):
node['pm_password'] = node['ipmi_pass']
node['pm_user'] = node['ipmi_user']
node['mac'] = [node['mac_address']]
+ if 'cpus' in node:
+ node['cpu'] = node['cpus']
for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
'disk_device'):
diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh
index 0b13c8c8..a17036ac 100755
--- a/lib/undercloud-functions.sh
+++ b/lib/undercloud-functions.sh
@@ -11,6 +11,7 @@
##verify vm exists, an has a dhcp lease assigned to it
##params: none
function setup_undercloud_vm {
+ local libvirt_imgs=/var/lib/libvirt/images
if ! virsh list --all | grep undercloud > /dev/null; then
undercloud_nets="default admin"
if [[ $enabled_network_list =~ "external" ]]; then
@@ -27,17 +28,18 @@ function setup_undercloud_vm {
#error: internal error: received hangup / error event on socket
#error: Reconnected to the hypervisor
- local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
- cp -f $IMAGES/undercloud.qcow2 $undercloud_dst
+ cp -f $IMAGES/undercloud.qcow2 $libvirt_imgs/undercloud.qcow2
+ cp -f $IMAGES/overcloud-full.vmlinuz $libvirt_imgs/overcloud-full.vmlinuz
+ cp -f $IMAGES/overcloud-full.initrd $libvirt_imgs/overcloud-full.initrd
# resize Undercloud machine
echo "Checking if Undercloud needs to be resized..."
- undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
+ undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $libvirt_imgs/undercloud.qcow2 |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$undercloud_size" -lt 30 ]; then
qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
- LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $IMAGES/undercloud.qcow2 $undercloud_dst
- LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
- new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
+ LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $IMAGES/undercloud.qcow2 $libvirt_imgs/undercloud.qcow2
+ LIBGUESTFS_BACKEND=direct virt-customize -a $libvirt_imgs/undercloud.qcow2 --run-command 'xfs_growfs -d /dev/sda1 || true'
+ new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $libvirt_imgs/undercloud.qcow2 |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$new_size" -lt 30 ]; then
echo "Error resizing Undercloud machine, disk size is ${new_size}"
exit 1
@@ -56,11 +58,11 @@ function setup_undercloud_vm {
# if the VM is not running update the authkeys and start it
if ! virsh list | grep undercloud > /dev/null; then
if [ "$debug" == 'TRUE' ]; then
- LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --root-password password:opnfvapex
+ LIBGUESTFS_BACKEND=direct virt-customize -a $libvirt_imgs/undercloud.qcow2 --root-password password:opnfvapex
fi
echo "Injecting ssh key to Undercloud VM"
- LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \
+ LIBGUESTFS_BACKEND=direct virt-customize -a $libvirt_imgs/undercloud.qcow2 --run-command "mkdir -p /root/.ssh/" \
--upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
--run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
--run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
@@ -115,6 +117,9 @@ function setup_undercloud_vm {
echo -e "${blue}\r ${reset}"
sleep 1
+ # ensure stack user on Undercloud machine has an ssh key
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
+
# ssh key fix for stack user
ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
}
@@ -163,47 +168,13 @@ $compute_nic_template
EOF
EOI
- # ensure stack user on Undercloud machine has an ssh key
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
-
- if [ "$virtual" == "TRUE" ]; then
-
- # copy the Undercloud VM's stack user's pub key to
- # root's auth keys so that Undercloud can control
- # vm power on the hypervisor
- ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
- fi
-
- # allow stack to control power management on the hypervisor via sshkey
- # only if this is a virtual deployment
- if [ "$virtual" == "TRUE" ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-while read -r line; do
- stack_key=\${stack_key}\\\\\\\\n\${line}
-done < <(cat ~/.ssh/id_rsa)
-stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
-sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
-EOI
- fi
-
- # copy stack's ssh key to this users authorized keys
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
-
# disable requiretty for sudo
ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
# configure undercloud on Undercloud VM
- echo "Running undercloud configuration."
- echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
+ echo "Running undercloud installation and configuration."
+ echo "Logging undercloud installation to stack@undercloud:/home/stack/apex-undercloud-install.log"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-sed -i 's/#local_ip/local_ip/' undercloud.conf
-sed -i 's/#network_gateway/network_gateway/' undercloud.conf
-sed -i 's/#network_cidr/network_cidr/' undercloud.conf
-sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
-sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
-sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
-sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
-
openstack-config --set undercloud.conf DEFAULT local_ip ${admin_installer_vm_ip}/${admin_cidr##*/}
openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_installer_vm_ip}
openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_cidr}
@@ -213,6 +184,7 @@ openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_intros
openstack-config --set undercloud.conf DEFAULT undercloud_debug false
openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.${domain_name}"
openstack-config --set undercloud.conf DEFAULT enable_ui false
+openstack-config --set undercloud.conf DEFAULT undercloud_update_packages false
sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
@@ -224,14 +196,12 @@ sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel
sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-# we assume that packages will not need to be updated with undercloud install
-# and that it will be used only to configure the undercloud
-# packages updates would need to be handled manually with yum update
-sudo cp -f /usr/share/diskimage-builder/elements/yum/bin/install-packages /usr/share/diskimage-builder/elements/yum/bin/install-packages.bak
-cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-packages > /dev/null
-#!/bin/sh
-exit 0
-EOF
+#####
+# TEMP WORKAROUND, REMOVE WHEN SNAPS SUPPORTS GLANCE API v2
+# JIRA: SNAPS-66
+#####
+sudo sed -i '/glance::api::enable_v1_api/ s/false/true/' -i /usr/share/openstack-tripleo-heat-templates/puppet/services/glance-api.yaml
+
openstack undercloud install &> apex-undercloud-install.log || {
# cat the undercloud install log incase it fails
@@ -240,8 +210,6 @@ openstack undercloud install &> apex-undercloud-install.log || {
exit 1
}
-sleep 30
-sudo systemctl restart openstack-glance-api
# Set nova domain name
sudo openstack-config --set /etc/nova/nova.conf DEFAULT dns_domain ${domain_name}
sudo openstack-config --set /etc/nova/nova.conf DEFAULT dhcp_domain ${domain_name}
@@ -254,11 +222,6 @@ sudo systemctl restart openstack-nova-scheduler
sudo openstack-config --set /etc/neutron/neutron.conf DEFAULT dns_domain ${domain_name}
sudo systemctl restart neutron-server
sudo systemctl restart neutron-dhcp-agent
-
-sudo sed -i '/num_engine_workers/c\num_engine_workers = 2' /etc/heat/heat.conf
-sudo sed -i '/#workers\s=/c\workers = 2' /etc/heat/heat.conf
-sudo systemctl restart openstack-heat-engine
-sudo systemctl restart openstack-heat-api
EOI
# configure external network
@@ -286,10 +249,4 @@ fi
EOI
fi
-# WORKAROUND: must restart the above services to fix sync problem with nova compute manager
-# TODO: revisit and file a bug if necessary. This should eventually be removed
-# as well as glance api problem
-echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}"
-sleep 15
-
}
diff --git a/lib/virtual-setup-functions.sh b/lib/virtual-setup-functions.sh
index c74a374b..ac7b507b 100755
--- a/lib/virtual-setup-functions.sh
+++ b/lib/virtual-setup-functions.sh
@@ -74,23 +74,26 @@ EOF
node${i}:
mac_address: "$mac"
ipmi_ip: 192.168.122.1
- ipmi_user: root
- ipmi_pass: "INSERT_STACK_USER_PRIV_KEY"
- pm_type: "pxe_ssh"
- cpus: $vcpus
+ ipmi_user: admin
+ ipmi_pass: "password"
+ pm_type: "pxe_ipmitool"
+ pm_port: "623$i"
+ cpu: $vcpus
memory: $ramsize
disk: 41
arch: "x86_64"
capabilities: "$capability"
EOF
+ vbmc add baremetal$i --port 623$i
+ if service firewalld status > /dev/null; then
+ firewall-cmd --permanent --zone=public --add-port=623$i/udp
+ fi
+ # TODO: add iptables check and commands too
+ vbmc start baremetal$i
done
-
- #Overwrite the tripleo-inclubator domain.xml with our own, keeping a backup.
- if [ ! -f /usr/share/tripleo/templates/domain.xml.bak ]; then
- /usr/bin/mv -f /usr/share/tripleo/templates/domain.xml /usr/share/tripleo/templates/domain.xml.bak
+ if service firewalld status > /dev/null; then
+ firewall-cmd --reload
fi
-
- /usr/bin/cp -f $LIB/installer/domain.xml /usr/share/tripleo/templates/domain.xml
}
##Create virtual nodes in virsh
@@ -101,7 +104,7 @@ EOF
## vcpus - Number of VCPUs to use (defaults to 4)
## ramsize - Size of RAM for VM in MB (defaults to 8192)
function define_vm () {
- local vcpus ramsize
+ local vcpus ramsize volume_path direct_boot kernel_args
if [ -z "$5" ]; then
vcpus=4
@@ -129,14 +132,23 @@ function define_vm () {
exit 1
fi
+ # undercloud need to be direct booted.
+ # the upstream image no longer includes the kernel and initrd
+ if [ "$1" == 'undercloud' ]; then
+ direct_boot='--direct-boot overcloud-full'
+ kernel_args='--kernel-arg console=ttyS0 --kernel-arg root=/dev/sda'
+ fi
+
# create the VM
- /usr/libexec/openstack-tripleo/configure-vm --name $1 \
- --bootdev $2 \
- --image "$volume_path" \
- --diskbus sata \
- --arch x86_64 \
- --cpus $vcpus \
- --memory $ramsize \
- --libvirt-nic-driver virtio \
- --baremetal-interface $4
+ $LIB/configure-vm --name $1 \
+ --bootdev $2 \
+ --image "$volume_path" \
+ --diskbus sata \
+ --arch $(uname -i) \
+ --cpus $vcpus \
+ --memory $ramsize \
+ --libvirt-nic-driver virtio \
+ $direct_boot \
+ $kernel_args \
+ --baremetal-interface $4
}