summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/opnfv-apex-undercloud.spec4
-rwxr-xr-xci/deploy.sh49
-rw-r--r--docs/installation-instructions/baremetal.rst29
3 files changed, 61 insertions, 21 deletions
diff --git a/build/opnfv-apex-undercloud.spec b/build/opnfv-apex-undercloud.spec
index 18092740..8d585db2 100644
--- a/build/opnfv-apex-undercloud.spec
+++ b/build/opnfv-apex-undercloud.spec
@@ -9,8 +9,8 @@ URL: https://gerrit.opnfv.org/gerrit/apex.git
Source0: opnfv-apex-undercloud.tar.gz
BuildArch: noarch
-BuildRequires: openvswitch qemu-kvm python-docutils
-Requires: openvswitch qemu-kvm bridge-utils libguestfs-tools
+BuildRequires: openvswitch libvirt qemu-kvm python-docutils
+Requires: openvswitch libvirt qemu-kvm bridge-utils libguestfs-tools
%description
Scripts and Disk images to launch Instack Undercloud for OPNFV Apex
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 3663ff0a..31b5d2f5 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -303,7 +303,7 @@ parse_inventory_file() {
exit 1
fi
- eval $(parse_yaml $INVENTORY_FILE)
+ eval $(parse_yaml $INVENTORY_FILE) || echo "${red}Failed to parse inventory.yaml. Aborting.${reset}" && exit 1
instack_env_output="
{
@@ -388,7 +388,8 @@ function configure_deps {
fi
# ensure networks are configured
- systemctl start openvswitch
+ systemctl status libvirtd || systemctl start libvirtd
+ systemctl status openvswitch || systemctl start openvswitch
# If flat we only use admin network
if [[ "$net_isolation_enabled" == "FALSE" ]]; then
@@ -400,10 +401,15 @@ function configure_deps {
virsh_enabled_networks=$enabled_network_list
fi
+ virsh net-list | grep default || virsh net-define /usr/share/libvirt/networks/default.xml
+ virsh net-list | grep -E "default\s+active" > /dev/null || virsh net-start default
+ virsh net-list | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
+
for network in ${OPNFV_NETWORK_TYPES}; do
ovs-vsctl list-br | grep ${NET_MAP[$network]} > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
virsh net-list --all | grep ${NET_MAP[$network]} > /dev/null || virsh net-define $CONFIG/${NET_MAP[$network]}-net.xml
virsh net-list | grep -E "${NET_MAP[$network]}\s+active" > /dev/null || virsh net-start ${NET_MAP[$network]}
+ virsh net-list | grep -E "${NET_MAP[$network]}\s+active\s+yes" > /dev/null || virsh net-autostart --network ${NET_MAP[$network]}
done
echo -e "${blue}INFO: Bridges set: ${reset}"
@@ -491,7 +497,7 @@ function setup_instack_vm {
#error: internal error: received hangup / error event on socket
#error: Reconnected to the hypervisor
- instack_dst=/var/lib/libvirt/images/instack.qcow2
+ local instack_dst=/var/lib/libvirt/images/instack.qcow2
cp -f $RESOURCES/instack.qcow2 $instack_dst
# resize instack machine
@@ -499,8 +505,8 @@ function setup_instack_vm {
instack_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $instack_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$instack_size" -lt 30 ]; then
qemu-img resize /var/lib/libvirt/images/instack.qcow2 +25G
- LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/instack.qcow2 $instack_dst
- LIBGUESTFS_BACKEND=direct virt-customize -a $instack_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
+ LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/instack.qcow2 $instack_dst
+ LIBGUESTFS_BACKEND=direct virt-customize -a $instack_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $instack_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$new_size" -lt 30 ]; then
echo "Error resizing instack machine, disk size is ${new_size}"
@@ -519,7 +525,7 @@ function setup_instack_vm {
# if the VM is not running update the authkeys and start it
if ! virsh list | grep instack > /dev/null; then
echo "Injecting ssh key to instack VM"
- virt-customize -c qemu:///system -d instack --run-command "mkdir -p /root/.ssh/" \
+ LIBGUESTFS_BACKEND=direct virt-customize -a $instack_dst --run-command "mkdir -p /root/.ssh/" \
--upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
--run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
--run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
@@ -798,6 +804,11 @@ function undercloud_prep_overcloud_deploy {
SDN_IMAGE=opendaylight
if [ "${deploy_options_array['sfc']}" == 'true' ]; then
SDN_IMAGE+=-sfc
+ if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
+ echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment."
+ echo "Please install the opnfv-apex-opendaylight-sfc package to provide this overcloud image for deployment.${reset}"
+ exit 1
+ fi
fi
elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
@@ -817,6 +828,14 @@ function undercloud_prep_overcloud_deploy {
exit 1
fi
+ # Make sure the correct overcloud image is available
+ if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
+ echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
+ echo "Both ONOS and OpenDaylight are currently deployed from this image."
+ echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
+ exit 1
+ fi
+
echo "Copying overcloud image to instack"
scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
@@ -932,10 +951,10 @@ function configure_post_install {
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
source overcloudrc
set -o errexit
-service_tenant_id=$(keystone tenant-get service 2>/dev/null | grep id | cut -d '|' -f 3)
+service_tenant_id="\$(keystone tenant-get service | grep id | awk '{ print \$4 }')"
echo "Configuring Neutron external network"
-neutron net-create external --router:external=True --tenant-id $service_tenant_id
-neutron subnet-create --name external-net --tenant-id $service_tenant_id --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
+neutron net-create external --router:external=True --tenant-id \$service_tenant_id
+neutron subnet-create --name external-net --tenant-id \$service_tenant_id --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
EOI
echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
@@ -999,7 +1018,7 @@ for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
sudo chown heat-admin /home/heat-admin/messages.log
EOF
scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
-if [ "\$debug" == "TRUE" ]; then
+if [ "$debug" == "TRUE" ]; then
nova list --ip \$node
echo "---------------------------"
echo "-----/var/log/messages-----"
@@ -1013,12 +1032,12 @@ fi
sudo rm -f /home/heat-admin/messages.log
EOF
done
-EOI
- # Print out the dashboard URL
- source stackrc
- publicvip=$(heat output-show overcloud PublicVip | sed 's/"//g')
- echo "Overcloud dashboard available at http://$publicvip/dashboard"
+# Print out the dashboard URL
+source stackrc
+publicvip=\$(heat output-show overcloud PublicVip | sed 's/"//g')
+echo "Overcloud dashboard available at http://\$publicvip/dashboard"
+EOI
}
diff --git a/docs/installation-instructions/baremetal.rst b/docs/installation-instructions/baremetal.rst
index c77f2df7..20df1375 100644
--- a/docs/installation-instructions/baremetal.rst
+++ b/docs/installation-instructions/baremetal.rst
@@ -10,7 +10,7 @@ platform. All the networks involved in the OPNFV infrastructure as well as the
networks and the private tenant VLANs needs to be manually configured.
The Jumphost can be installed using the bootable ISO or by other means including the
-(``opnfv-apex``) RPM and virtualization capabilities. The Jumphost should then be
+(``opnfv-apex``) RPMs and virtualization capabilities. The Jumphost should then be
configured with an IP gateway on its admin or public interface and configured with a
working DNS server. The Jumphost should also have routable access to the lights out network.
@@ -68,9 +68,30 @@ Install Bare Metal Jumphost
1a. If your Jumphost does not have CentOS 7 already on it, or you would like to do a fresh
install, then download the Apex bootable ISO from OPNFV artifacts <http://artifacts.opnfv.org/>.
+ There have been isolated reports of problems with the ISO having trouble completing
+ installation successfully. In the unexpected event the ISO does not work please workaround
+ this by downloading the CentOS 7 DVD and performing a "Virtualization Host" install.
+ If you perform a "Minimal Install" or install type other than "Virtualization Host" simply
+ run ``sudo yum groupinstall "Virtualization Host" && chkconfig libvird on`` and reboot
+ the host. Once you have completed the base CentOS install proceed to step 1b.
1b. If your Jump host already has CentOS 7 with libvirt running on it then install the
- opnfv-apex RPM from OPNFV artifacts <http://artifacts.opnfv.org/>.
+ opnfv-apex RPMs from OPNFV artifacts <http://artifacts.opnfv.org/>. The following RPMS
+ are available for installation:
+
+ - opnfv-apex - OpenDaylight L2 / L3 and ONOS support **
+ - opnfv-apex-opendaylight-sfc - OpenDaylight SFC support **
+ - opnfv-apex-undercloud (required)
+ - opnfv-apex-common (required)
+
+ ** One or more of these RPMs is required
+ If you only want the experimental SFC support then the opnfv-apex RPM is not required.
+ If you only want OpenDaylight or ONOS support then the opnfv-apex-opendaylight-sfc RPM is
+ not required.
+
+ To install these RPMs download them to the local disk on your CentOS 7 install and pass the
+ file names directly to yum:
+ ``sudo yum install opnfv-apex-<version>.rpm opnfv-apex-undercloud-<version>.rpm opnfv-apex-common-<version>.rpm``
2a. Boot the ISO off of a USB or other installation media and walk through installing OPNFV CentOS 7.
The ISO comes prepared to be written directly to a USB drive with dd as such:
@@ -88,7 +109,7 @@ Install Bare Metal Jumphost
opnfv-apex. If you do not have external connectivity to use this repository you need to download
the OpenVSwitch RPM from the RDO Project repositories and install it with the opnfv-apex RPM.
-3. After the operating system and the opnfv-apex RPM are installed, login to your Jumphost as root.
+3. After the operating system and the opnfv-apex RPMs are installed, login to your Jumphost as root.
4. Configure IP addresses on the interfaces that you have selected as your networks.
@@ -149,7 +170,7 @@ You are now ready to deploy OPNFV using Apex!
Follow the steps below to execute:
1. Execute opnfv-deploy
- ``sudo opnfv-deploy [ --flat | -n network_setttings.yaml ] -i instackenv.json -d deploy_settings.yaml``
+ ``sudo opnfv-deploy [ --flat | -n network_setttings.yaml ] -i inventory.yaml -d deploy_settings.yaml``
If you need more information about the options that can be passed to opnfv-deploy use ``opnfv-deploy --help``
--flat will collapse all networks onto a single nic, -n network_settings.yaml allows you to customize your
networking topology.