summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/cache.sh2
-rw-r--r--build/nics-template.yaml.jinja22
-rw-r--r--build/opnfv-apex-common.spec14
-rw-r--r--build/opnfv-environment.yaml5
-rwxr-xr-xbuild/overcloud-full.sh3
-rwxr-xr-xbuild/overcloud-onos.sh12
-rwxr-xr-xbuild/overcloud-opendaylight-sfc.sh48
-rw-r--r--build/python-congressclient.diff25
-rwxr-xr-xbuild/undercloud.sh3
-rw-r--r--build/variables.sh6
-rw-r--r--ci/PR_revision.log2
-rwxr-xr-xci/build.sh4
-rwxr-xr-xci/deploy.sh1000
-rwxr-xr-xci/util.sh34
-rw-r--r--config/deploy/os-odl_l2-sfc-noha.yaml1
-rw-r--r--config/deploy/os-onos-sfc-ha.yaml10
-rw-r--r--lib/common-functions.sh30
-rwxr-xr-xlib/configure-deps-functions.sh144
-rwxr-xr-xlib/overcloud-deploy-functions.sh284
-rwxr-xr-xlib/post-install-functions.sh162
-rw-r--r--lib/python/apex/network_environment.py12
-rwxr-xr-xlib/undercloud-functions.sh277
-rwxr-xr-xlib/virtual-setup-functions.sh151
-rw-r--r--tests/test_apex_deploy_env.py3
24 files changed, 1183 insertions, 1051 deletions
diff --git a/build/cache.sh b/build/cache.sh
index 123748a2..4d41a7a2 100644
--- a/build/cache.sh
+++ b/build/cache.sh
@@ -60,7 +60,7 @@ function populate_cache {
else
my_md5=$(grep ${filename} $CACHE_DIR/.cache | awk {'print $1'})
if [ "$remote_md5" != "$my_md5" ]; then
- echo "MD5 mismatch: Remote MD5 is ${remote_md5}, Cache file MD5 is ${my_md5}"
+ echo "MD5 mismatch, cache file MD5 is ${my_md5}"
echo "Downloading $filename"
curl_file $1 $filename
else
diff --git a/build/nics-template.yaml.jinja2 b/build/nics-template.yaml.jinja2
index 455ae0f3..91d0c478 100644
--- a/build/nics-template.yaml.jinja2
+++ b/build/nics-template.yaml.jinja2
@@ -94,7 +94,7 @@ resources:
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- {%- if 'public_network' in enabled_networks and vlans['private_network'] is number %}
+ {%- if 'public_network' in enabled_networks and vlans['public_network'] is number %}
-
type: vlan
vlan_id: {get_param: ExternalNetworkVlanID}
diff --git a/build/opnfv-apex-common.spec b/build/opnfv-apex-common.spec
index ce77d6df..5e1a9b36 100644
--- a/build/opnfv-apex-common.spec
+++ b/build/opnfv-apex-common.spec
@@ -45,6 +45,7 @@ install config/deploy/os-odl_l2-fdio-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-ap
install config/deploy/os-odl_l2-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-noha.yaml
install config/deploy/os-odl_l3-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l3-nofeature-ha.yaml
install config/deploy/os-onos-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-onos-nofeature-ha.yaml
+install config/deploy/os-onos-sfc-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-onos-sfc-ha.yaml
install config/deploy/os-ocl-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml
install config/network/network_settings.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings.yaml
install config/network/network_settings_v6.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings_v6.yaml
@@ -52,7 +53,12 @@ install config/network/network_settings_v6.yaml %{buildroot}%{_sysconfdir}/opnfv
mkdir -p %{buildroot}%{_var}/opt/opnfv/lib/python/apex
install lib/common-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
+install lib/configure-deps-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
install lib/parse-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
+install lib/virtual-setup-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
+install lib/undercloud-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
+install lib/overcloud-deploy-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
+install lib/post-install-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
install lib/utility-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
install lib/python/apex_python_utils.py %{buildroot}%{_var}/opt/opnfv/lib/python/
mkdir -p %{buildroot}%{python3_sitelib}/apex/
@@ -85,7 +91,12 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%attr(755,root,root) %{_bindir}/opnfv-clean
%attr(755,root,root) %{_bindir}/opnfv-util
%{_var}/opt/opnfv/lib/common-functions.sh
+%{_var}/opt/opnfv/lib/configure-deps-functions.sh
%{_var}/opt/opnfv/lib/parse-functions.sh
+%{_var}/opt/opnfv/lib/virtual-setup-functions.sh
+%{_var}/opt/opnfv/lib/undercloud-functions.sh
+%{_var}/opt/opnfv/lib/overcloud-deploy-functions.sh
+%{_var}/opt/opnfv/lib/post-install-functions.sh
%{_var}/opt/opnfv/lib/utility-functions.sh
%{_var}/opt/opnfv/lib/python/
%{python3_sitelib}/apex/
@@ -102,6 +113,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl_l3-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/os-onos-nofeature-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-onos-sfc-ha.yaml
%{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/network_settings.yaml
%{_sysconfdir}/opnfv-apex/network_settings_v6.yaml
@@ -116,7 +128,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%changelog
* Tue Jul 5 2016 Dan Radez <dradez@redhat.com> - 3.0-10
-- Adding parse-functions.sh
+- Adding functions.sh files
* Thu Jun 15 2016 Tim Rozet <trozet@redhat.com> - 3.0-9
- Add fdio scenarios.
* Tue Jun 14 2016 Feng Pan <fpan@redhat.com> - 3.0-8
diff --git a/build/opnfv-environment.yaml b/build/opnfv-environment.yaml
index 54b1e6f2..7a3ae739 100644
--- a/build/opnfv-environment.yaml
+++ b/build/opnfv-environment.yaml
@@ -8,3 +8,8 @@ parameters:
EnableSahara: false
ExtraConfig:
tripleo::ringbuilder::build_ring: False
+ nova::policy::policies:
+ nova-os_compute_api:servers:show:host_status:
+ key: 'os_compute_api:servers:show:host_status'
+ value: 'rule:admin_or_owner'
+
diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh
index 936decc3..01f40269 100755
--- a/build/overcloud-full.sh
+++ b/build/overcloud-full.sh
@@ -64,6 +64,7 @@ popd > /dev/null
# enable connection tracking for protocal sctp
# install the congress rpms
# upload and explode the congress puppet module
+# install doctor driver ## Can be removed in Newton
LIBGUESTFS_BACKEND=direct virt-customize \
--upload ../opnfv-puppet-tripleo.tar.gz:/etc/puppet/modules \
--run-command "sed -i 's/^#UseDNS.*$/UseDNS no/' /etc/ssh/sshd_config" \
@@ -81,6 +82,8 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--install "python2-congressclient" \
--upload puppet-congress.tar.gz:/etc/puppet/modules/ \
--run-command "cd /etc/puppet/modules/ && tar xzf puppet-congress.tar.gz" \
+ --run-command "cd /usr/lib/python2.7/site-packages/congress/datasources && curl -O $doctor_driver" \
+ --run-command "sed -i \"s/'--detailed-exitcodes',/'--detailed-exitcodes','-l','syslog','-l','console',/g\" /var/lib/heat-config/hooks/puppet" \
-a overcloud-full_build.qcow2
mv -f overcloud-full_build.qcow2 overcloud-full.qcow2
diff --git a/build/overcloud-onos.sh b/build/overcloud-onos.sh
index d59be0a3..e13923d9 100755
--- a/build/overcloud-onos.sh
+++ b/build/overcloud-onos.sh
@@ -19,12 +19,12 @@ cp -f overcloud-full.qcow2 overcloud-full-onos_build.qcow2
#######################################
# upgrade ovs into ovs 2.5.90 with NSH function
-curl -L -O ${onos_ovs_uri}/package_ovs_rpm.tar.gz
-tar -xzf package_ovs_rpm.tar.gz
-LIBGUESTFS_BACKEND=direct virt-customize --upload openvswitch-kmod-2.5.90-1.el7.centos.x86_64.rpm:/root/ \
- --run-command "yum install -y /root/openvswitch-kmod-2.5.90-1.el7.centos.x86_64.rpm" \
- --upload openvswitch-2.5.90-1.el7.centos.x86_64.rpm:/root/ \
- --run-command "yum upgrade -y /root/openvswitch-2.5.90-1.el7.centos.x86_64.rpm" \
+curl -L -O ${onos_ovs_uri}/package_ovs_rpm_new.tar.gz
+tar -xzf package_ovs_rpm_new.tar.gz
+LIBGUESTFS_BACKEND=direct virt-customize --upload ${ovs_kmod_rpm_name}:/root/ \
+ --run-command "yum install -y /root/${ovs_kmod_rpm_name}" \
+ --upload ${ovs_rpm_name}:/root/ \
+ --run-command "yum upgrade -y /root/${ovs_rpm_name}" \
-a overcloud-full-onos_build.qcow2
diff --git a/build/overcloud-opendaylight-sfc.sh b/build/overcloud-opendaylight-sfc.sh
index 9b38ca29..5032ba06 100755
--- a/build/overcloud-opendaylight-sfc.sh
+++ b/build/overcloud-opendaylight-sfc.sh
@@ -8,43 +8,27 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
set -e
+source ./variables.sh
+pushd images > /dev/null
################################################
##### Adding SFC+OpenDaylight overcloud #####
################################################
-#copy opendaylight overcloud full to isolate odl-sfc
-cp -f images/overcloud-full-opendaylight.qcow2 images/overcloud-full-opendaylight-sfc_build.qcow2
+#copy opendaylight overcloud full to odl-sfc
+cp -f overcloud-full-opendaylight.qcow2 overcloud-full-opendaylight-sfc_build.qcow2
-# work around for XFS grow bug
-# http://xfs.org/index.php/XFS_FAQ#Q:_Why_do_I_receive_No_space_left_on_device_after_xfs_growfs.3F
-cat > /tmp/xfs-grow-remount-fix.service << EOF
-[Unit]
-Description=XFS Grow Bug Remount
-After=network.target
-Before=getty@tty1.service
+# upgrade ovs into ovs 2.5.90 with NSH function
+if ! [[ -f "$ovs_rpm_name" && -f "$ovs_kmod_rpm_name" ]]; then
+ curl -L -O ${onos_ovs_uri}/package_ovs_rpm_new.tar.gz
+ tar -xzf package_ovs_rpm_new.tar.gz
+fi
-[Service]
-Type=oneshot
-ExecStart=/bin/bash -c "echo 'XFS Grow Bug Remount Sleeping 180s' && sleep 180 && echo 'XFS Grow Bug Remounting Now' && mount -o remount,inode64 /"
-RemainAfterExit=no
+LIBGUESTFS_BACKEND=direct virt-customize --upload ${ovs_kmod_rpm_name}:/root/ \
+ --run-command "yum install -y /root/${ovs_kmod_rpm_name}" \
+ --upload ${ovs_rpm_name}:/root/ \
+ --run-command "yum upgrade -y /root/${ovs_rpm_name}" \
+ -a overcloud-full-opendaylight-sfc_build.qcow2
-[Install]
-WantedBy=multi-user.target
-EOF
-
-
-# kernel is patched with patch from this post
-# http://xfs.org/index.php/XFS_FAQ#Q:_Why_do_I_receive_No_space_left_on_device_after_xfs_growfs.3F
-LIBGUESTFS_BACKEND=direct virt-customize \
- --upload "/tmp/xfs-grow-remount-fix.service:/etc/systemd/system/xfs-grow-remount-fix.service" \
- --run-command "chmod 664 /etc/systemd/system/xfs-grow-remount-fix.service" \
- --run-command "systemctl enable xfs-grow-remount-fix.service" \
- --install 'https://radez.fedorapeople.org/kernel-ml-3.13.7-1.el7.centos.x86_64.rpm' \
- --run-command 'grub2-set-default "\$(grep -P \"submenu|^menuentry\" /boot/grub2/grub.cfg | cut -d \"\\x27\" | head -n 1)"' \
- --install 'https://radez.fedorapeople.org/openvswitch-kmod-2.3.90-1.el7.centos.x86_64.rpm' \
- --run-command 'yum downgrade -y https://radez.fedorapeople.org/openvswitch-2.3.90-1.x86_64.rpm' \
- --run-command 'rm -f /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \
- --run-command 'ln -s /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/extra/openvswitch/openvswitch.ko /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \
- -a images/overcloud-full-opendaylight-sfc_build.qcow2
-mv images/overcloud-full-opendaylight-sfc_build.qcow2 images/overcloud-full-opendaylight-sfc.qcow2
+mv overcloud-full-opendaylight-sfc_build.qcow2 overcloud-full-opendaylight-sfc.qcow2
+popd > /dev/null
diff --git a/build/python-congressclient.diff b/build/python-congressclient.diff
new file mode 100644
index 00000000..82e76e07
--- /dev/null
+++ b/build/python-congressclient.diff
@@ -0,0 +1,25 @@
+From 26d39efbb931e04a5e95d504c27ede12d0a81c43 Mon Sep 17 00:00:00 2001
+From: Masahito Muroi <muroi.masahito@lab.ntt.co.jp>
+Date: Fri, 25 Mar 2016 14:06:00 +0900
+Subject: [PATCH] Allows DataSource's config field to have not dict type obj
+
+CongressClient expects all datasource driver has dict object in
+config field. It raises an error when a datasource doesn't have
+any config.
+
+This patch allows config fields to be None object.
+
+Change-Id: I73354f1073f3f814854652eaeaa4b3bbe4bfcf7d
+---
+
+diff --git a/congressclient/common/utils.py b/congressclient/common/utils.py
+index 9a381e8..b5cedd4 100644
+--- a/congressclient/common/utils.py
++++ b/congressclient/common/utils.py
+@@ -77,6 +77,8 @@
+ :param data: a dict
+ :rtype: a string formatted to {a:b, c:d}
+ """
++ if not isinstance(data, dict):
++ return str(data)
+ return str({str(key): str(value) for key, value in data.items()})
diff --git a/build/undercloud.sh b/build/undercloud.sh
index 9873c177..7b40625e 100755
--- a/build/undercloud.sh
+++ b/build/undercloud.sh
@@ -27,6 +27,7 @@ pushd images > /dev/null
# enabling ceph OSDs to live on the controller
# OpenWSMan package update supports the AMT Ironic driver for the TealBox
# seeding configuration files specific to OPNFV
+# add congress client and apply: https://review.openstack.org/#/c/297515/
# add congress password to python-triploclient
LIBGUESTFS_BACKEND=direct virt-customize \
--upload ../opnfv-tht.tar.gz:/usr/share \
@@ -39,6 +40,8 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--upload ../opnfv-environment.yaml:/home/stack/ \
--upload ../virtual-environment.yaml:/home/stack/ \
--install "python2-congressclient" \
+ --upload ../python-congressclient.diff:/tmp \
+ --run-command "cd /usr/lib/python2.7/site-packages && patch -p1 < /tmp/python-congressclient.diff" \
--run-command "sed -i '/SERVICE_LIST/a\\ \x27congress\x27: {\x27password_field\x27: \x27OVERCLOUD_CONGRESS_PASSWORD\x27},' /usr/lib/python2.7/site-packages/tripleoclient/constants.py" \
--run-command "sed -i '/PASSWORD_NAMES =/a\\ \"OVERCLOUD_CONGRESS_PASSWORD\",' /usr/lib/python2.7/site-packages/tripleoclient/utils.py" \
--run-command "sed -i '/AodhPassword/a\\ parameters\[\x27CongressPassword\x27\] = passwords\[\x27OVERCLOUD_CONGRESS_PASSWORD\x27\]' /usr/lib/python2.7/site-packages/tripleoclient/v1/overcloud_deploy.py" \
diff --git a/build/variables.sh b/build/variables.sh
index ccd1add5..6632b526 100644
--- a/build/variables.sh
+++ b/build/variables.sh
@@ -12,8 +12,9 @@ rdo_images_uri=https://ci.centos.org/artifacts/rdo/images/mitaka/delorean/stable
onos_release_uri=https://downloads.onosproject.org/nightly/
onos_release_file=onos-1.6.0-rc2.tar.gz
onos_jdk_uri=https://www.dropbox.com/s/qyujpib8zyhzeev
-onos_ovs_uri=https://www.dropbox.com/s/gm6o6k80l56pf0o
+onos_ovs_uri=https://www.dropbox.com/s/7rfr9l2qz3a36cc
openstack_congress=https://radez.fedorapeople.org/openstack-congress-2016.1-1.fc24.noarch.rpm
+doctor_driver=https://raw.githubusercontent.com/muroi/congress/doctor-poc/congress/datasources/doctor_driver.py
dpdk_uri_base=http://artifacts.opnfv.org/ovsnfv
dpdk_rpms=(
@@ -23,3 +24,6 @@ dpdk_rpms=(
'ovs4opnfv-32930523-dpdk-tools-16.04.0-1.el7.centos.x86_64.rpm'
'ovs4opnfv-32930523-openvswitch-2.5.90-0.12032.gitc61e93d6.1.el7.centos.x86_64.rpm'
)
+
+ovs_rpm_name=openvswitch-2.5.90-0.11974.gitc4623bb8.1.el7.centos.x86_64.rpm
+ovs_kmod_rpm_name=openvswitch-kmod-2.5.90-1.el7.centos.x86_64.rpm
diff --git a/ci/PR_revision.log b/ci/PR_revision.log
index a63d491d..de62cb6b 100644
--- a/ci/PR_revision.log
+++ b/ci/PR_revision.log
@@ -10,3 +10,5 @@
26,Fixes ODL ML2 IP
30,Adds OVS DPDK config
33,Removes QOS service plugin from Neutron
+35,Use nic1 as control plane inteface name instead of default br-ex
+38,Fixes nova default floating pool to be 'external'
diff --git a/ci/build.sh b/ci/build.sh
index fd079c91..af065b11 100755
--- a/ci/build.sh
+++ b/ci/build.sh
@@ -112,10 +112,10 @@ fi
# Conditionally execute RPM build checks if the specs change and target is not rpm or iso
if [[ "$MAKE_TARGETS" == "images" ]]; then
- commit_file_list=$(git show --pretty="format:" --name-only)
+ commit_file_list=$(git show --pretty="format:" --name-status)
if git show -s | grep "force-build-rpms"; then
MAKE_TARGETS+=" rpms"
- elif [[ $commit_file_list == *build/Makefile* ]]; then
+ elif [[ $commit_file_list == *"A$(printf '\t')"* || $commit_file_list == *build/Makefile* ]]; then
# Makefile forces all rpms to be checked
MAKE_TARGETS+=" rpms-check"
else
diff --git a/ci/deploy.sh b/ci/deploy.sh
index bb4e12ed..57c63766 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -58,7 +58,12 @@ ip_address_family=4
# Libraries
lib_files=(
$LIB/common-functions.sh
+$LIB/configure-deps-functions.sh
$LIB/parse-functions.sh
+$LIB/virtual-setup-functions.sh
+$LIB/undercloud-functions.sh
+$LIB/overcloud-deploy-functions.sh
+$LIB/post-install-functions.sh
$LIB/utility-functions.sh
$LIB/installer/onos/onos_gw_mac_update.sh
)
@@ -69,997 +74,6 @@ for lib_file in ${lib_files[@]}; do
fi
done
-##FUNCTIONS
-##checks if prefix exists in string
-##params: string, prefix
-##usage: contains_prefix "deploy_setting_launcher=1" "deploy_setting"
-contains_prefix() {
- local mystr=$1
- local prefix=$2
- if echo $mystr | grep -E "^$prefix.*$" > /dev/null; then
- return 0
- else
- return 1
- fi
-}
-
-##verify internet connectivity
-#params: none
-function verify_internet {
- if ping -c 2 $ping_site > /dev/null; then
- if ping -c 2 www.google.com > /dev/null; then
- echo "${blue}Internet connectivity detected${reset}"
- return 0
- else
- echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
- return 1
- fi
- else
- echo "${red}No internet connectivity detected${reset}"
- return 1
- fi
-}
-
-##download dependencies if missing and configure host
-#params: none
-function configure_deps {
- if ! verify_internet; then
- echo "${red}Will not download dependencies${reset}"
- internet=false
- fi
-
- # verify ip forwarding
- if sysctl net.ipv4.ip_forward | grep 0; then
- sudo sysctl -w net.ipv4.ip_forward=1
- sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
- fi
-
- # ensure no dhcp server is running on jumphost
- if ! sudo systemctl status dhcpd | grep dead; then
- echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}"
- sudo systemctl stop dhcpd
- sudo systemctl disable dhcpd
- fi
-
- # ensure networks are configured
- systemctl status libvirtd || systemctl start libvirtd
- systemctl status openvswitch || systemctl start openvswitch
-
- # If flat we only use admin network
- if [[ "$net_isolation_enabled" == "FALSE" ]]; then
- virsh_enabled_networks="admin_network"
- enabled_network_list="admin_network"
- # For baremetal we only need to create/attach Undercloud to admin and public
- elif [ "$virtual" == "FALSE" ]; then
- virsh_enabled_networks="admin_network public_network"
- else
- virsh_enabled_networks=$enabled_network_list
- fi
-
- # ensure default network is configured correctly
- libvirt_dir="/usr/share/libvirt/networks"
- virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml
- virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default
- virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
-
- if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
- for network in ${enabled_network_list}; do
- echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
- ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
- virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
-<network>
- <name>$network</name>
- <forward mode='bridge'/>
- <bridge name='${NET_MAP[$network]}'/>
- <virtualport type='openvswitch'/>
-</network>
-EOF
- if ! (virsh net-list --all | grep $network > /dev/null); then
- echo "${red}ERROR: unable to create network: ${network}${reset}"
- exit 1;
- fi
- rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
- virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
- virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
- done
-
- echo -e "${blue}INFO: Bridges set: ${reset}"
- ovs-vsctl list-br
-
- # bridge interfaces to correct OVS instances for baremetal deployment
- for network in ${enabled_network_list}; do
- if [[ "$network" != "admin_network" && "$network" != "public_network" ]]; then
- continue
- fi
- this_interface=$(eval echo \${${network}_bridged_interface})
- # check if this a bridged interface for this network
- if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
- if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
- echo -e "${red}ERROR: Unable to bridge interface ${this_interface} to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
- exit 1
- else
- echo -e "${blue}INFO: Interface ${this_interface} bridged to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
- fi
- else
- echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}"
- exit 1
- fi
- done
- else
- for network in ${OPNFV_NETWORK_TYPES}; do
- echo "${blue}INFO: Creating Virsh Network: $network${reset}"
- virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
-<network ipv6='yes'>
-<name>$network</name>
-<bridge name='${NET_MAP[$network]}'/>
-</network>
-EOF
- if ! (virsh net-list --all | grep $network > /dev/null); then
- echo "${red}ERROR: unable to create network: ${network}${reset}"
- exit 1;
- fi
- rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
- virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
- virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
- done
-
- echo -e "${blue}INFO: Bridges set: ${reset}"
- brctl show
- fi
-
- echo -e "${blue}INFO: virsh networks set: ${reset}"
- virsh net-list
-
- # ensure storage pool exists and is started
- virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images
- virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default)
-
- if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
- echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
-Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
- fi
-
- if ! lsmod | grep kvm > /dev/null; then modprobe kvm; fi
- if ! lsmod | grep kvm_intel > /dev/null; then modprobe kvm_intel; fi
-
- if ! lsmod | grep kvm > /dev/null; then
- echo "${red}kvm kernel modules not loaded!${reset}"
- return 1
- fi
-
- ##sshkeygen for root
- if [ ! -e ~/.ssh/id_rsa.pub ]; then
- ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
- fi
-
- echo "${blue}All dependencies installed and running${reset}"
-}
-
-##verify vm exists, an has a dhcp lease assigned to it
-##params: none
-function setup_undercloud_vm {
- if ! virsh list --all | grep undercloud > /dev/null; then
- undercloud_nets="default admin_network"
- if [[ $enabled_network_list =~ "public_network" ]]; then
- undercloud_nets+=" public_network"
- fi
- define_vm undercloud hd 30 "$undercloud_nets" 4 12288
-
- ### this doesn't work for some reason I was getting hangup events so using cp instead
- #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
- #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
- #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
- #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
- #error: cannot close volume undercloud.qcow2
- #error: internal error: received hangup / error event on socket
- #error: Reconnected to the hypervisor
-
- local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
- cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
-
- # resize Undercloud machine
- echo "Checking if Undercloud needs to be resized..."
- undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
- if [ "$undercloud_size" -lt 30 ]; then
- qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
- LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
- LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
- new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
- if [ "$new_size" -lt 30 ]; then
- echo "Error resizing Undercloud machine, disk size is ${new_size}"
- exit 1
- else
- echo "Undercloud successfully resized"
- fi
- else
- echo "Skipped Undercloud resize, upstream is large enough"
- fi
-
- else
- echo "Found Undercloud VM, using existing VM"
- fi
-
- # if the VM is not running update the authkeys and start it
- if ! virsh list | grep undercloud > /dev/null; then
- echo "Injecting ssh key to Undercloud VM"
- LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \
- --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
- --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
- --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
- --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
- virsh start undercloud
- fi
-
- sleep 10 # let undercloud get started up
-
- # get the undercloud VM IP
- CNT=10
- echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
- undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
- while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
- echo -n "."
- sleep 10
- CNT=$((CNT-1))
- done
- UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'})
-
- if [ -z "$UNDERCLOUD" ]; then
- echo "\n\nCan't get IP for Undercloud. Can Not Continue."
- exit 1
- else
- echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}"
- fi
-
- CNT=10
- echo -en "${blue}\rValidating Undercloud VM connectivity${reset}"
- while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
- echo -n "."
- sleep 3
- CNT=$((CNT-1))
- done
- if [ "$CNT" -eq 0 ]; then
- echo "Failed to contact Undercloud. Can Not Continue"
- exit 1
- fi
- CNT=10
- while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
- echo -n "."
- sleep 3
- CNT=$((CNT-1))
- done
- if [ "$CNT" -eq 0 ]; then
- echo "Failed to connect to Undercloud. Can Not Continue"
- exit 1
- fi
-
- # extra space to overwrite the previous connectivity output
- echo -e "${blue}\r ${reset}"
- sleep 1
-
- # ssh key fix for stack user
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
-}
-
-##Create virtual nodes in virsh
-##params: vcpus, ramsize
-function setup_virtual_baremetal {
- local vcpus ramsize
- if [ -z "$1" ]; then
- vcpus=4
- ramsize=8192
- elif [ -z "$2" ]; then
- vcpus=$1
- ramsize=8192
- else
- vcpus=$1
- ramsize=$(($2*1024))
- fi
- #start by generating the opening json for instackenv.json
- cat > $CONFIG/instackenv-virt.json << EOF
-{
- "nodes": [
-EOF
-
- # next create the virtual machines and add their definitions to the file
- if [ "$ha_enabled" == "False" ]; then
- # 1 controller + computes
- # zero based so just pass compute count
- vm_index=$VM_COMPUTES
- else
- # 3 controller + computes
- # zero based so add 2 to compute count
- vm_index=$((2+$VM_COMPUTES))
- fi
-
- for i in $(seq 0 $vm_index); do
- if ! virsh list --all | grep baremetal${i} > /dev/null; then
- define_vm baremetal${i} network 41 'admin_network' $vcpus $ramsize
- for n in private_network public_network storage_network api_network; do
- if [[ $enabled_network_list =~ $n ]]; then
- echo -n "$n "
- virsh attach-interface --domain baremetal${i} --type network --source $n --model virtio --config
- fi
- done
- else
- echo "Found Baremetal ${i} VM, using existing VM"
- fi
- #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
- mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
-
- if [ "$VM_COMPUTES" -gt 0 ]; then
- capability="profile:compute"
- VM_COMPUTES=$((VM_COMPUTES - 1))
- else
- capability="profile:control"
- fi
-
- cat >> $CONFIG/instackenv-virt.json << EOF
- {
- "pm_addr": "192.168.122.1",
- "pm_user": "root",
- "pm_password": "INSERT_STACK_USER_PRIV_KEY",
- "pm_type": "pxe_ssh",
- "mac": [
- "$mac"
- ],
- "cpu": "$vcpus",
- "memory": "$ramsize",
- "disk": "41",
- "arch": "x86_64",
- "capabilities": "$capability"
- },
-EOF
- done
-
- #truncate the last line to remove the comma behind the bracket
- tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{}
-
- #finally reclose the bracket and close the instackenv.json file
- cat >> $CONFIG/instackenv-virt.json << EOF
- }
- ],
- "arch": "x86_64",
- "host-ip": "192.168.122.1",
- "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager",
- "seed-ip": "",
- "ssh-key": "INSERT_STACK_USER_PRIV_KEY",
- "ssh-user": "root"
-}
-EOF
- #Overwrite the tripleo-inclubator domain.xml with our own, keeping a backup.
- if [ ! -f /usr/share/tripleo/templates/domain.xml.bak ]; then
- /usr/bin/mv -f /usr/share/tripleo/templates/domain.xml /usr/share/tripleo/templates/domain.xml.bak
- fi
-
- /usr/bin/cp -f $LIB/installer/domain.xml /usr/share/tripleo/templates/domain.xml
-}
-
-##Create virtual nodes in virsh
-##params: name - String: libvirt name for VM
-## bootdev - String: boot device for the VM
-## disksize - Number: size of the disk in GB
-## ovs_bridges: - List: list of ovs bridges
-## vcpus - Number of VCPUs to use (defaults to 4)
-## ramsize - Size of RAM for VM in MB (defaults to 8192)
-function define_vm () {
- local vcpus ramsize
-
- if [ -z "$5" ]; then
- vcpus=4
- ramsize=8388608
- elif [ -z "$6" ]; then
- vcpus=$5
- ramsize=8388608
- else
- vcpus=$5
- ramsize=$(($6*1024))
- fi
-
- # Create the libvirt storage volume
- if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then
- volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2")
- echo "Volume ${1} exists. Deleting Existing Volume $volume_path"
- virsh vol-dumpxml ${1}.qcow2 --pool default > /dev/null || echo '' #ok for this to fail
- touch $volume_path
- virsh vol-delete ${1}.qcow2 --pool default
- fi
- virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2
- volume_path=$(virsh vol-path --pool default ${1}.qcow2)
- if [ ! -f $volume_path ]; then
- echo "$volume_path Not created successfully... Aborting"
- exit 1
- fi
-
- # create the VM
- /usr/libexec/openstack-tripleo/configure-vm --name $1 \
- --bootdev $2 \
- --image "$volume_path" \
- --diskbus sata \
- --arch x86_64 \
- --cpus $vcpus \
- --memory $ramsize \
- --libvirt-nic-driver virtio \
- --baremetal-interface $4
-}
-
-##Copy over the glance images and instackenv json file
-##params: none
-function configure_undercloud {
- local controller_nic_template compute_nic_template
- echo
- echo "Copying configuration files to Undercloud"
- if [[ "$net_isolation_enabled" == "TRUE" ]]; then
- echo -e "${blue}Network Environment set for Deployment: ${reset}"
- cat /tmp/network-environment.yaml
- scp ${SSH_OPTIONS[@]} /tmp/network-environment.yaml "stack@$UNDERCLOUD":
-
- # check for ODL L3/ONOS
- if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
- ext_net_type=br-ex
- fi
-
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- ovs_dpdk_bridge='br-phy'
- else
- ovs_dpdk_bridge=''
- fi
-
- if ! controller_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -i $net_isolation_enabled -t $CONFIG/nics-template.yaml.jinja2 -n "$enabled_network_list" -e "br-ex" -af $ip_addr_family); then
- echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
- exit 1
- fi
-
- if ! compute_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -i $net_isolation_enabled -t $CONFIG/nics-template.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family -d "$ovs_dpdk_bridge"); then
- echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
- exit 1
- fi
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-mkdir nics/
-cat > nics/controller.yaml << EOF
-$controller_nic_template
-EOF
-cat > nics/compute.yaml << EOF
-$compute_nic_template
-EOF
-EOI
- fi
-
- # ensure stack user on Undercloud machine has an ssh key
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
-
- if [ "$virtual" == "TRUE" ]; then
-
- # copy the Undercloud VM's stack user's pub key to
- # root's auth keys so that Undercloud can control
- # vm power on the hypervisor
- ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
-
- DEPLOY_OPTIONS+=" --libvirt-type qemu"
- INSTACKENV=$CONFIG/instackenv-virt.json
-
- # upload instackenv file to Undercloud for virtual deployment
- scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
- fi
-
- # allow stack to control power management on the hypervisor via sshkey
- # only if this is a virtual deployment
- if [ "$virtual" == "TRUE" ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-while read -r line; do
- stack_key=\${stack_key}\\\\\\\\n\${line}
-done < <(cat ~/.ssh/id_rsa)
-stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
-sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
-EOI
- fi
-
- # copy stack's ssh key to this users authorized keys
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
-
- # disable requiretty for sudo
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
-
- # configure undercloud on Undercloud VM
- echo "Running undercloud configuration."
- echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-if [[ "$net_isolation_enabled" == "TRUE" ]]; then
- sed -i 's/#local_ip/local_ip/' undercloud.conf
- sed -i 's/#network_gateway/network_gateway/' undercloud.conf
- sed -i 's/#network_cidr/network_cidr/' undercloud.conf
- sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
- sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
- sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
- sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
-
- openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
- openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
- openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
- openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
- openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
- openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
- openstack-config --set undercloud.conf DEFAULT undercloud_debug false
-
-fi
-
-sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-
-# we assume that packages will not need to be updated with undercloud install
-# and that it will be used only to configure the undercloud
-# packages updates would need to be handled manually with yum update
-sudo cp -f /usr/share/diskimage-builder/elements/yum/bin/install-packages /usr/share/diskimage-builder/elements/yum/bin/install-packages.bak
-cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-packages > /dev/null
-#!/bin/sh
-exit 0
-EOF
-
-openstack undercloud install &> apex-undercloud-install.log || {
- # cat the undercloud install log incase it fails
- echo "ERROR: openstack undercloud install has failed. Dumping Log:"
- cat apex-undercloud-install.log
- exit 1
-}
-
-sleep 30
-sudo systemctl restart openstack-glance-api
-sudo systemctl restart openstack-nova-conductor
-sudo systemctl restart openstack-nova-compute
-
-sudo sed -i '/num_engine_workers/c\num_engine_workers = 2' /etc/heat/heat.conf
-sudo sed -i '/#workers\s=/c\workers = 2' /etc/heat/heat.conf
-sudo systemctl restart openstack-heat-engine
-sudo systemctl restart openstack-heat-api
-EOI
-
-# configure external network
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" << EOI
-if [[ "$public_network_vlan" != "native" ]]; then
- cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${public_network_vlan}
-DEVICE=vlan${public_network_vlan}
-ONBOOT=yes
-DEVICETYPE=ovs
-TYPE=OVSIntPort
-BOOTPROTO=static
-IPADDR=${public_network_provisioner_ip}
-PREFIX=${public_network_cidr##*/}
-OVS_BRIDGE=br-ctlplane
-OVS_OPTIONS="tag=${public_network_vlan}"
-EOF
- ifup vlan${public_network_vlan}
-else
- if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then
- ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2
- ip link set up dev eth2
- fi
-fi
-EOI
-
-# WORKAROUND: must restart the above services to fix sync problem with nova compute manager
-# TODO: revisit and file a bug if necessary. This should eventually be removed
-# as well as glance api problem
-echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}"
-sleep 15
-
-}
-
-##preping it for deployment and launch the deploy
-##params: none
-function undercloud_prep_overcloud_deploy {
- if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
- if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_l3.yaml"
- elif [ "${deploy_options_array['sfc']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml"
- elif [ "${deploy_options_array['vpn']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sdnvpn.yaml"
- elif [ "${deploy_options_array['vpp']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_fdio.yaml"
- else
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml"
- fi
- SDN_IMAGE=opendaylight
- if [ "${deploy_options_array['sfc']}" == 'True' ]; then
- SDN_IMAGE+=-sfc
- if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
- echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment."
- echo "Please install the opnfv-apex-opendaylight-sfc package to provide this overcloud image for deployment.${reset}"
- exit 1
- fi
- fi
- elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
- SDN_IMAGE=opendaylight
- elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml"
- SDN_IMAGE=onos
- elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
- echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
- exit 1
- elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
- echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
- SDN_IMAGE=opendaylight
- else
- echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
- echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, False, or null${reset}"
- exit 1
- fi
-
-
-
- # Make sure the correct overcloud image is available
- if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
- echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
- echo "Both ONOS and OpenDaylight are currently deployed from this image."
- echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
- exit 1
- fi
-
- echo "Copying overcloud image to Undercloud"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
- scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
-
- # Install ovs-dpdk inside the overcloud image if it is enabled.
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- # install dpdk packages before ovs
- echo -e "${blue}INFO: Enabling kernel modules for dpdk inside overcloud image${reset}"
-
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- cat << EOF > vfio_pci.modules
-#!/bin/bash
-exec /sbin/modprobe vfio_pci >/dev/null 2>&1
-EOF
-
- cat << EOF > uio_pci_generic.modules
-#!/bin/bash
-exec /sbin/modprobe uio_pci_generic >/dev/null 2>&1
-EOF
-
- LIBGUESTFS_BACKEND=direct virt-customize --upload vfio_pci.modules:/etc/sysconfig/modules/ \
- --upload uio_pci_generic.modules:/etc/sysconfig/modules/ \
- --run-command "chmod 0755 /etc/sysconfig/modules/vfio_pci.modules" \
- --run-command "chmod 0755 /etc/sysconfig/modules/uio_pci_generic.modules" \
- --run-command "yum install -y /root/dpdk_rpms/*" \
- -a overcloud-full.qcow2
-EOI
- elif [ "${deploy_options_array['dataplane']}" != 'ovs' ]; then
- echo "${red}${deploy_options_array['dataplane']} not supported${reset}"
- exit 1
- fi
-
- # Set ODL version accordingly
- if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['odl_version']}" == 'boron' ]]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
- --run-command "yum -y install /root/boron/*" \
- -a overcloud-full.qcow2
-EOI
- fi
-
- # Add performance deploy options if they have been set
- if [ ! -z "${deploy_options_array['performance']}" ]; then
-
- # Remove previous kernel args files per role
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Compute-kernel_params.txt"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Controller-kernel_params.txt"
-
- # Push performance options to subscript to modify per-role images as needed
- for option in "${performance_options[@]}" ; do
- echo -e "${blue}Setting performance option $option${reset}"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option"
- done
-
- # Build IPA kernel option ramdisks
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
-/bin/cp -f /home/stack/ironic-python-agent.initramfs /root/
-mkdir -p ipa/
-pushd ipa
-gunzip -c ../ironic-python-agent.initramfs | cpio -i
-if [ ! -f /home/stack/Compute-kernel_params.txt ]; then
- touch /home/stack/Compute-kernel_params.txt
- chown stack /home/stack/Compute-kernel_params.txt
-fi
-/bin/cp -f /home/stack/Compute-kernel_params.txt tmp/kernel_params.txt
-echo "Compute params set: "
-cat tmp/kernel_params.txt
-/bin/cp -f /root/image.py usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.py
-/bin/cp -f /root/image.pyc usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.pyc
-find . | cpio -o -H newc | gzip > /home/stack/Compute-ironic-python-agent.initramfs
-chown stack /home/stack/Compute-ironic-python-agent.initramfs
-if [ ! -f /home/stack/Controller-kernel_params.txt ]; then
- touch /home/stack/Controller-kernel_params.txt
- chown stack /home/stack/Controller-kernel_params.txt
-fi
-/bin/cp -f /home/stack/Controller-kernel_params.txt tmp/kernel_params.txt
-echo "Controller params set: "
-cat tmp/kernel_params.txt
-find . | cpio -o -H newc | gzip > /home/stack/Controller-ironic-python-agent.initramfs
-chown stack /home/stack/Controller-ironic-python-agent.initramfs
-popd
-/bin/rm -rf ipa/
-EOI
-
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
- fi
-
- # make sure ceph is installed
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
-
- # scale compute nodes according to inventory
- total_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/instackenv.json | grep -c memory")
-
- # check if HA is enabled
- if [[ "$ha_enabled" == "True" ]]; then
- DEPLOY_OPTIONS+=" --control-scale 3"
- compute_nodes=$((total_nodes - 3))
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
- else
- compute_nodes=$((total_nodes - 1))
- fi
-
- if [ "$compute_nodes" -le 0 ]; then
- echo -e "${red}ERROR: Invalid number of compute nodes: ${compute_nodes}. Check your inventory file.${reset}"
- exit 1
- else
- echo -e "${blue}INFO: Number of compute nodes set for deployment: ${compute_nodes}${reset}"
- DEPLOY_OPTIONS+=" --compute-scale ${compute_nodes}"
- fi
-
- if [[ "$net_isolation_enabled" == "TRUE" ]]; then
- #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
- DEPLOY_OPTIONS+=" -e network-environment.yaml"
- fi
-
- if [[ "$ha_enabled" == "True" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
- DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
- fi
-
- if [[ ! "$virtual" == "TRUE" ]]; then
- DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
- else
- DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
- fi
-
- DEPLOY_OPTIONS+=" -e opnfv-environment.yaml"
-
- echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
-
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-if [ "$debug" == 'TRUE' ]; then
- LIBGUESTFS_BACKEND=direct virt-customize -a overcloud-full.qcow2 --root-password password:opnfvapex
-fi
-
-source stackrc
-set -o errexit
-echo "Uploading overcloud glance images"
-openstack overcloud image upload
-
-echo "Configuring undercloud and discovering nodes"
-openstack baremetal import --json instackenv.json
-openstack baremetal configure boot
-bash -x set_perf_images.sh ${performance_roles[@]}
-#if [[ -z "$virtual" ]]; then
-# openstack baremetal introspection bulk start
-#fi
-echo "Configuring flavors"
-for flavor in baremetal control compute; do
- echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
- if openstack flavor list | grep \${flavor}; then
- openstack flavor delete \${flavor}
- fi
- openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 \${flavor}
- if ! openstack flavor list | grep \${flavor}; then
- echo -e "${red}ERROR: Unable to create flavor \${flavor}${reset}"
- fi
-done
-openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
-openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="control" control
-openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="compute" compute
-echo "Configuring nameserver on ctlplane network"
-dns_server_ext=''
-for dns_server in ${dns_servers}; do
- dns_server_ext="\${dns_server_ext} --dns-nameserver \${dns_server}"
-done
-neutron subnet-update \$(neutron subnet-list | grep -Ev "id|tenant|external|storage" | grep -v \\\\-\\\\- | awk {'print \$2'}) \${dns_server_ext}
-echo "Executing overcloud deployment, this should run for an extended period without output."
-sleep 60 #wait for Hypervisor stats to check-in to nova
-# save deploy command so it can be used for debugging
-cat > deploy_command << EOF
-openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
-EOF
-EOI
-
- if [ "$interactive" == "TRUE" ]; then
- if ! prompt_user "Overcloud Deployment"; then
- echo -e "${blue}INFO: User requests exit${reset}"
- exit 0
- fi
- fi
-
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source stackrc
-openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
-if ! heat stack-list | grep CREATE_COMPLETE 1>/dev/null; then
- $(typeset -f debug_stack)
- debug_stack
- exit 1
-fi
-EOI
-
- # Configure DPDK
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI || (echo "DPDK config failed, exiting..."; exit 1)
-source stackrc
-set -o errexit
-for node in \$(nova list | grep novacompute | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
-echo "Running DPDK test app on \$node"
-ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-set -o errexit
-sudo dpdk_helloworld --no-pci
-sudo dpdk_nic_bind -s
-EOF
-done
-EOI
- fi
-
- if [ "$debug" == 'TRUE' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source overcloudrc
-echo "Keystone Endpoint List:"
-openstack endpoint list
-echo "Keystone Service List"
-openstack service list
-cinder quota-show \$(openstack project list | grep admin | awk {'print \$2'})
-EOI
- fi
-}
-
-##Post configuration after install
-##params: none
-function configure_post_install {
- local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip
- opnfv_attach_networks="admin_network public_network"
-
- echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
-
- echo -e "${blue}INFO: Configuring ssh for root to overcloud nodes...${reset}"
- # copy host key to instack
- scp ${SSH_OPTIONS[@]} /root/.ssh/id_rsa.pub "stack@$UNDERCLOUD":jumphost_id_rsa.pub
-
- # add host key to overcloud nodes authorized keys
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-source stackrc
-nodes=\$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-for node in \$nodes; do
-cat ~/jumphost_id_rsa.pub | ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" 'cat >> ~/.ssh/authorized_keys'
-done
-EOI
-
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- echo -e "${blue}INFO: Bringing up br-phy and ovs-agent for dpdk compute nodes...${reset}"
- compute_nodes=$(undercloud_connect stack "source stackrc; nova list | grep compute | wc -l")
- i=0
- while [ "$i" -lt "$compute_nodes" ]; do
- overcloud_connect compute${i} "sudo ifup br-phy; sudo systemctl restart neutron-openvswitch-agent"
- i=$((i + 1))
- done
- fi
-
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source overcloudrc
-set -o errexit
-echo "Configuring Neutron external network"
-neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }')
-neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
-
-echo "Removing sahara endpoint and service"
-sahara_service_id=\$(openstack service list | grep sahara | cut -d ' ' -f 2)
-sahara_endpoint_id=\$(openstack endpoint list | grep sahara | cut -d ' ' -f 2)
-openstack endpoint delete \$sahara_endpoint_id
-openstack service delete \$sahara_service_id
-
-echo "Removing swift endpoint and service"
-swift_service_id=\$(openstack service list | grep swift | cut -d ' ' -f 2)
-swift_endpoint_id=\$(openstack endpoint list | grep swift | cut -d ' ' -f 2)
-openstack endpoint delete \$swift_endpoint_id
-openstack service delete \$swift_service_id
-
-if [ "${deploy_options_array['congress']}" == 'True' ]; then
- for s in nova neutronv2 ceilometer cinder glancev2 keystone; do
- openstack congress datasource create \$s "\$s" \\
- --config username=\$OS_USERNAME \\
- --config tenant_name=\$OS_TENANT_NAME \\
- --config password=\$OS_PASSWORD \\
- --config auth_url=\$OS_AUTH_URL
- done
-fi
-EOI
-
- echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
- for network in ${opnfv_attach_networks}; do
- ovs_ip=$(find_ip ${NET_MAP[$network]})
- tmp_ip=''
- if [ -n "$ovs_ip" ]; then
- echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
- else
- echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
- # use last IP of allocation pool
- eval "ip_range=\${${network}_usable_ip_range}"
- ovs_ip=${ip_range##*,}
- eval "net_cidr=\${${network}_cidr}"
- sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
- sudo ip link set up ${NET_MAP[$network]}
- tmp_ip=$(find_ip ${NET_MAP[$network]})
- if [ -n "$tmp_ip" ]; then
- echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
- continue
- else
- echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
- return 1
- fi
- fi
- done
-
- # for virtual, we NAT public network through Undercloud
- if [ "$virtual" == "TRUE" ]; then
- if ! configure_undercloud_nat ${public_network_cidr}; then
- echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${public_network_cidr}${reset}"
- exit 1
- else
- echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud public network${reset}"
- fi
- fi
-
- # for sfc deployments we need the vxlan workaround
- if [ "${deploy_options_array['sfc']}" == 'True' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source stackrc
-set -o errexit
-for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
-ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-sudo ifconfig br-int up
-sudo ip route add 123.123.123.0/24 dev br-int
-EOF
-done
-EOI
- fi
-
- # Collect deployment logs
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-mkdir -p ~/deploy_logs
-rm -rf deploy_logs/*
-source stackrc
-set -o errexit
-for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
- ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
- sudo cp /var/log/messages /home/heat-admin/messages.log
- sudo chown heat-admin /home/heat-admin/messages.log
-EOF
-scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
-if [ "$debug" == "TRUE" ]; then
- nova list --ip \$node
- echo "---------------------------"
- echo "-----/var/log/messages-----"
- echo "---------------------------"
- cat ~/deploy_logs/\$node.messages.log
- echo "---------------------------"
- echo "----------END LOG----------"
- echo "---------------------------"
-fi
- ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
- sudo rm -f /home/heat-admin/messages.log
-EOF
-done
-
-# Print out the undercloud IP and dashboard URL
-source stackrc
-echo "Undercloud IP: $UNDERCLOUD, please connect by doing 'opnfv-util undercloud'"
-echo "Overcloud dashboard available at http://\$(heat output-show overcloud PublicVip | sed 's/"//g')/dashboard"
-EOI
-
-}
-
display_usage() {
echo -e "Usage:\n$0 [arguments] \n"
echo -e " -d|--deploy-settings : Full path to deploy settings yaml file. Optional. Defaults to null"
@@ -1195,8 +209,6 @@ parse_cmdline() {
}
-##END FUNCTIONS
-
main() {
parse_cmdline "$@"
echo -e "${blue}INFO: Parsing network settings file...${reset}"
@@ -1216,7 +228,7 @@ main() {
parse_inventory_file
fi
configure_undercloud
- undercloud_prep_overcloud_deploy
+ overcloud_deploy
if [ "$post_config" == "TRUE" ]; then
if ! configure_post_install; then
echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}"
diff --git a/ci/util.sh b/ci/util.sh
index f55e1e0f..ad7f0788 100755
--- a/ci/util.sh
+++ b/ci/util.sh
@@ -21,13 +21,19 @@ resolve_cmd() {
}
display_usage() {
- echo -e "Usage:\n$0 [arguments] \n"
- echo -e " undercloud <user> <command> : Connect to Undercloud VM as <user> and execute command <command>\n"
- echo -e " <user> Optional: Defaults to 'stack', <command> Optional: Defaults to none\n"
- echo -e " overcloud <node> <command> : Connect to an Overcloud <node> and execute command <command>\n"
- echo -e " <node> Required in format controller|compute<number>. Example: controller0\n"
- echo -e " <command> Optional: Defaults to none\n"
- echo -e " debug-stack : Print parsed deployment failures to stdout \n"
+ echo -e "Usage:\n$0 subcommand [ arguments ]\n"
+ echo -e "Arguments:\n"
+ echo -e " undercloud [ user [ command ] ] Connect to Undercloud VM as user and optionally execute a command\n"
+ echo -e " user Optional: Defaults to 'stack'\n"
+ echo -e " command Optional: Defaults to none\n"
+ echo -e ""
+ echo -e " overcloud [ node [ command ] ] Connect to an Overcloud node and optionally execute a command\n"
+ echo -e " node Required: in format controller|compute<number>. Example: controller0\n"
+ echo -e " command Optional: Defaults to none\n"
+ echo -e ""
+ echo -e " debug-stack Print parsed deployment failures to stdout \n"
+ echo -e ""
+ echo -e " mock-detached on | off Add firewall rules to the jump host to mock a detached deployment \n"
}
##translates the command line argument
@@ -77,6 +83,20 @@ parse_cmdline() {
undercloud_connect stack "$(typeset -f debug_stack); debug_stack"
exit 0
;;
+ mock-detached)
+ if [ "$2" == "on" ]; then
+ echo "Blocking output http and https traffic"
+ iptables -A OUTPUT -p tcp --dport 80 -j REJECT
+ iptables -A OUTPUT -p tcp --dport 443 -j REJECT
+ elif [ "$2" == "off" ]; then
+ echo "Allowing output http and https traffic"
+ iptables -D OUTPUT -p tcp --dport 80 -j REJECT
+ iptables -D OUTPUT -p tcp --dport 443 -j REJECT
+ else
+ display_usage
+ fi
+ exit 0
+ ;;
*)
echo -e "\n\nThis script is used to interact with Apex deployments\n\n"
echo "Use -h to display help"
diff --git a/config/deploy/os-odl_l2-sfc-noha.yaml b/config/deploy/os-odl_l2-sfc-noha.yaml
index 6d088af8..106fbca9 100644
--- a/config/deploy/os-odl_l2-sfc-noha.yaml
+++ b/config/deploy/os-odl_l2-sfc-noha.yaml
@@ -4,6 +4,7 @@ global_params:
deploy_options:
sdn_controller: opendaylight
sdn_l3: false
+ odl_version: boron
tacker: false
congress: false
sfc: true
diff --git a/config/deploy/os-onos-sfc-ha.yaml b/config/deploy/os-onos-sfc-ha.yaml
new file mode 100644
index 00000000..a81023db
--- /dev/null
+++ b/config/deploy/os-onos-sfc-ha.yaml
@@ -0,0 +1,10 @@
+global_params:
+ ha_enabled: true
+
+deploy_options:
+ sdn_controller: onos
+ sdn_l3: true
+ tacker: false
+ congress: false
+ sfc: true
+ vpn: false
diff --git a/lib/common-functions.sh b/lib/common-functions.sh
index 365f8e3f..2ace9970 100644
--- a/lib/common-functions.sh
+++ b/lib/common-functions.sh
@@ -257,3 +257,33 @@ function prompt_user {
fi
done
}
+
+##checks if prefix exists in string
+##params: string, prefix
+##usage: contains_prefix "deploy_setting_launcher=1" "deploy_setting"
+contains_prefix() {
+ local mystr=$1
+ local prefix=$2
+ if echo $mystr | grep -E "^$prefix.*$" > /dev/null; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+##verify internet connectivity
+#params: none
+function verify_internet {
+ if ping -c 2 $ping_site > /dev/null; then
+ if ping -c 2 www.google.com > /dev/null; then
+ echo "${blue}Internet connectivity detected${reset}"
+ return 0
+ else
+ echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
+ return 1
+ fi
+ else
+ echo "${red}No internet connectivity detected${reset}"
+ return 1
+ fi
+}
diff --git a/lib/configure-deps-functions.sh b/lib/configure-deps-functions.sh
new file mode 100755
index 00000000..06a4c72c
--- /dev/null
+++ b/lib/configure-deps-functions.sh
@@ -0,0 +1,144 @@
+#!/usr/bin/env bash
+##############################################################################
+# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+##download dependencies if missing and configure host
+#params: none
+function configure_deps {
+ if ! verify_internet; then
+ echo "${red}Will not download dependencies${reset}"
+ internet=false
+ fi
+
+ # verify ip forwarding
+ if sysctl net.ipv4.ip_forward | grep 0; then
+ sudo sysctl -w net.ipv4.ip_forward=1
+ sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
+ fi
+
+ # ensure no dhcp server is running on jumphost
+ if ! sudo systemctl status dhcpd | grep dead; then
+ echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}"
+ sudo systemctl stop dhcpd
+ sudo systemctl disable dhcpd
+ fi
+
+ # ensure networks are configured
+ systemctl status libvirtd || systemctl start libvirtd
+ systemctl status openvswitch || systemctl start openvswitch
+
+ # If flat we only use admin network
+ if [[ "$net_isolation_enabled" == "FALSE" ]]; then
+ virsh_enabled_networks="admin_network"
+ enabled_network_list="admin_network"
+ # For baremetal we only need to create/attach Undercloud to admin and public
+ elif [ "$virtual" == "FALSE" ]; then
+ virsh_enabled_networks="admin_network public_network"
+ else
+ virsh_enabled_networks=$enabled_network_list
+ fi
+
+ # ensure default network is configured correctly
+ libvirt_dir="/usr/share/libvirt/networks"
+ virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml
+ virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default
+ virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
+
+ if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
+ for network in ${enabled_network_list}; do
+ echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
+ ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
+ virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
+<network>
+ <name>$network</name>
+ <forward mode='bridge'/>
+ <bridge name='${NET_MAP[$network]}'/>
+ <virtualport type='openvswitch'/>
+</network>
+EOF
+ if ! (virsh net-list --all | grep $network > /dev/null); then
+ echo "${red}ERROR: unable to create network: ${network}${reset}"
+ exit 1;
+ fi
+ rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
+ virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
+ virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
+ done
+
+ echo -e "${blue}INFO: Bridges set: ${reset}"
+ ovs-vsctl list-br
+
+ # bridge interfaces to correct OVS instances for baremetal deployment
+ for network in ${enabled_network_list}; do
+ if [[ "$network" != "admin_network" && "$network" != "public_network" ]]; then
+ continue
+ fi
+ this_interface=$(eval echo \${${network}_bridged_interface})
+ # check if this a bridged interface for this network
+ if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
+ if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
+ echo -e "${red}ERROR: Unable to bridge interface ${this_interface} to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
+ exit 1
+ else
+ echo -e "${blue}INFO: Interface ${this_interface} bridged to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
+ fi
+ else
+ echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}"
+ exit 1
+ fi
+ done
+ else
+ for network in ${OPNFV_NETWORK_TYPES}; do
+ echo "${blue}INFO: Creating Virsh Network: $network${reset}"
+ virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
+<network ipv6='yes'>
+<name>$network</name>
+<bridge name='${NET_MAP[$network]}'/>
+</network>
+EOF
+ if ! (virsh net-list --all | grep $network > /dev/null); then
+ echo "${red}ERROR: unable to create network: ${network}${reset}"
+ exit 1;
+ fi
+ rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
+ virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
+ virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
+ done
+
+ echo -e "${blue}INFO: Bridges set: ${reset}"
+ brctl show
+ fi
+
+ echo -e "${blue}INFO: virsh networks set: ${reset}"
+ virsh net-list
+
+ # ensure storage pool exists and is started
+ virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images
+ virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default)
+
+ if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
+ echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
+Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
+ fi
+
+ if ! lsmod | grep kvm > /dev/null; then modprobe kvm; fi
+ if ! lsmod | grep kvm_intel > /dev/null; then modprobe kvm_intel; fi
+
+ if ! lsmod | grep kvm > /dev/null; then
+ echo "${red}kvm kernel modules not loaded!${reset}"
+ return 1
+ fi
+
+ ##sshkeygen for root
+ if [ ! -e ~/.ssh/id_rsa.pub ]; then
+ ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+ fi
+
+ echo "${blue}All dependencies installed and running${reset}"
+}
diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh
new file mode 100755
index 00000000..992d22af
--- /dev/null
+++ b/lib/overcloud-deploy-functions.sh
@@ -0,0 +1,284 @@
+#!/usr/bin/env bash
+##############################################################################
+# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+##preping it for deployment and launch the deploy
+##params: none
+function overcloud_deploy {
+ if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
+ if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_l3.yaml"
+ elif [ "${deploy_options_array['sfc']}" == 'True' ]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml"
+ elif [ "${deploy_options_array['vpn']}" == 'True' ]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sdnvpn.yaml"
+ elif [ "${deploy_options_array['vpp']}" == 'True' ]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_fdio.yaml"
+ else
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml"
+ fi
+ SDN_IMAGE=opendaylight
+ if [ "${deploy_options_array['sfc']}" == 'True' ]; then
+ SDN_IMAGE+=-sfc
+ if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
+ echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment."
+ echo "Please install the opnfv-apex-opendaylight-sfc package to provide this overcloud image for deployment.${reset}"
+ exit 1
+ fi
+ fi
+ elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
+ SDN_IMAGE=opendaylight
+ elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then
+ if [ "${deploy_options_array['sfc']}" == 'True' ]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos_sfc.yaml"
+ else
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml"
+ fi
+ SDN_IMAGE=onos
+ elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
+ echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
+ exit 1
+ elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
+ echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
+ SDN_IMAGE=opendaylight
+ else
+ echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
+ echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, False, or null${reset}"
+ exit 1
+ fi
+
+
+
+ # Make sure the correct overcloud image is available
+ if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
+ echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
+ echo "Both ONOS and OpenDaylight are currently deployed from this image."
+ echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
+ exit 1
+ fi
+
+ echo "Copying overcloud image to Undercloud"
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
+ scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
+
+ # Install ovs-dpdk inside the overcloud image if it is enabled.
+ if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
+ # install dpdk packages before ovs
+ echo -e "${blue}INFO: Enabling kernel modules for dpdk inside overcloud image${reset}"
+
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ cat << EOF > vfio_pci.modules
+#!/bin/bash
+exec /sbin/modprobe vfio_pci >/dev/null 2>&1
+EOF
+
+ cat << EOF > uio_pci_generic.modules
+#!/bin/bash
+exec /sbin/modprobe uio_pci_generic >/dev/null 2>&1
+EOF
+
+ LIBGUESTFS_BACKEND=direct virt-customize --upload vfio_pci.modules:/etc/sysconfig/modules/ \
+ --upload uio_pci_generic.modules:/etc/sysconfig/modules/ \
+ --run-command "chmod 0755 /etc/sysconfig/modules/vfio_pci.modules" \
+ --run-command "chmod 0755 /etc/sysconfig/modules/uio_pci_generic.modules" \
+ --run-command "yum install -y /root/dpdk_rpms/*" \
+ -a overcloud-full.qcow2
+EOI
+ elif [ "${deploy_options_array['dataplane']}" != 'ovs' ]; then
+ echo "${red}${deploy_options_array['dataplane']} not supported${reset}"
+ exit 1
+ fi
+
+ # Set ODL version accordingly
+ if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['odl_version']}" == 'boron' ]]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
+ --run-command "yum -y install /root/boron/*" \
+ -a overcloud-full.qcow2
+EOI
+ fi
+
+ # Add performance deploy options if they have been set
+ if [ ! -z "${deploy_options_array['performance']}" ]; then
+
+ # Remove previous kernel args files per role
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Compute-kernel_params.txt"
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Controller-kernel_params.txt"
+
+ # Push performance options to subscript to modify per-role images as needed
+ for option in "${performance_options[@]}" ; do
+ echo -e "${blue}Setting performance option $option${reset}"
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option"
+ done
+
+ # Build IPA kernel option ramdisks
+ ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
+/bin/cp -f /home/stack/ironic-python-agent.initramfs /root/
+mkdir -p ipa/
+pushd ipa
+gunzip -c ../ironic-python-agent.initramfs | cpio -i
+if [ ! -f /home/stack/Compute-kernel_params.txt ]; then
+ touch /home/stack/Compute-kernel_params.txt
+ chown stack /home/stack/Compute-kernel_params.txt
+fi
+/bin/cp -f /home/stack/Compute-kernel_params.txt tmp/kernel_params.txt
+echo "Compute params set: "
+cat tmp/kernel_params.txt
+/bin/cp -f /root/image.py usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.py
+/bin/cp -f /root/image.pyc usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.pyc
+find . | cpio -o -H newc | gzip > /home/stack/Compute-ironic-python-agent.initramfs
+chown stack /home/stack/Compute-ironic-python-agent.initramfs
+if [ ! -f /home/stack/Controller-kernel_params.txt ]; then
+ touch /home/stack/Controller-kernel_params.txt
+ chown stack /home/stack/Controller-kernel_params.txt
+fi
+/bin/cp -f /home/stack/Controller-kernel_params.txt tmp/kernel_params.txt
+echo "Controller params set: "
+cat tmp/kernel_params.txt
+find . | cpio -o -H newc | gzip > /home/stack/Controller-ironic-python-agent.initramfs
+chown stack /home/stack/Controller-ironic-python-agent.initramfs
+popd
+/bin/rm -rf ipa/
+EOI
+
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
+ fi
+
+ # make sure ceph is installed
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
+
+ # scale compute nodes according to inventory
+ total_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/instackenv.json | grep -c memory")
+
+ # check if HA is enabled
+ if [[ "$ha_enabled" == "True" ]]; then
+ DEPLOY_OPTIONS+=" --control-scale 3"
+ compute_nodes=$((total_nodes - 3))
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
+ else
+ compute_nodes=$((total_nodes - 1))
+ fi
+
+ if [ "$compute_nodes" -le 0 ]; then
+ echo -e "${red}ERROR: Invalid number of compute nodes: ${compute_nodes}. Check your inventory file.${reset}"
+ exit 1
+ else
+ echo -e "${blue}INFO: Number of compute nodes set for deployment: ${compute_nodes}${reset}"
+ DEPLOY_OPTIONS+=" --compute-scale ${compute_nodes}"
+ fi
+
+ if [[ "$net_isolation_enabled" == "TRUE" ]]; then
+ #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
+ DEPLOY_OPTIONS+=" -e network-environment.yaml"
+ fi
+
+ if [[ "$ha_enabled" == "True" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
+ DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
+ fi
+
+ if [[ ! "$virtual" == "TRUE" ]]; then
+ DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
+ else
+ DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
+ fi
+
+ DEPLOY_OPTIONS+=" -e opnfv-environment.yaml"
+
+ echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
+
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+if [ "$debug" == 'TRUE' ]; then
+ LIBGUESTFS_BACKEND=direct virt-customize -a overcloud-full.qcow2 --root-password password:opnfvapex
+fi
+
+source stackrc
+set -o errexit
+echo "Uploading overcloud glance images"
+openstack overcloud image upload
+
+echo "Configuring undercloud and discovering nodes"
+openstack baremetal import --json instackenv.json
+openstack baremetal configure boot
+bash -x set_perf_images.sh ${performance_roles[@]}
+#if [[ -z "$virtual" ]]; then
+# openstack baremetal introspection bulk start
+#fi
+echo "Configuring flavors"
+for flavor in baremetal control compute; do
+ echo -e "${blue}INFO: Updating flavor: \${flavor}${reset}"
+ if openstack flavor list | grep \${flavor}; then
+ openstack flavor delete \${flavor}
+ fi
+ openstack flavor create --id auto --ram 4096 --disk 39 --vcpus 1 \${flavor}
+ if ! openstack flavor list | grep \${flavor}; then
+ echo -e "${red}ERROR: Unable to create flavor \${flavor}${reset}"
+ fi
+done
+openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
+openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="control" control
+openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="compute" compute
+echo "Configuring nameserver on ctlplane network"
+dns_server_ext=''
+for dns_server in ${dns_servers}; do
+ dns_server_ext="\${dns_server_ext} --dns-nameserver \${dns_server}"
+done
+neutron subnet-update \$(neutron subnet-list | grep -Ev "id|tenant|external|storage" | grep -v \\\\-\\\\- | awk {'print \$2'}) \${dns_server_ext}
+echo "Executing overcloud deployment, this should run for an extended period without output."
+sleep 60 #wait for Hypervisor stats to check-in to nova
+# save deploy command so it can be used for debugging
+cat > deploy_command << EOF
+openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
+EOF
+EOI
+
+ if [ "$interactive" == "TRUE" ]; then
+ if ! prompt_user "Overcloud Deployment"; then
+ echo -e "${blue}INFO: User requests exit${reset}"
+ exit 0
+ fi
+ fi
+
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+source stackrc
+openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
+if ! heat stack-list | grep CREATE_COMPLETE 1>/dev/null; then
+ $(typeset -f debug_stack)
+ debug_stack
+ exit 1
+fi
+EOI
+
+ # Configure DPDK
+ if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI || (echo "DPDK config failed, exiting..."; exit 1)
+source stackrc
+set -o errexit
+for node in \$(nova list | grep novacompute | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
+echo "Running DPDK test app on \$node"
+ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
+set -o errexit
+sudo dpdk_helloworld --no-pci
+sudo dpdk_nic_bind -s
+EOF
+done
+EOI
+ fi
+
+ if [ "$debug" == 'TRUE' ]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+source overcloudrc
+echo "Keystone Endpoint List:"
+openstack endpoint list
+echo "Keystone Service List"
+openstack service list
+cinder quota-show \$(openstack project list | grep admin | awk {'print \$2'})
+EOI
+ fi
+}
diff --git a/lib/post-install-functions.sh b/lib/post-install-functions.sh
new file mode 100755
index 00000000..912a2a11
--- /dev/null
+++ b/lib/post-install-functions.sh
@@ -0,0 +1,162 @@
+#!/usr/bin/env bash
+##############################################################################
+# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+##Post configuration after install
+##params: none
+function configure_post_install {
+ local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip
+ opnfv_attach_networks="admin_network public_network"
+
+ echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
+
+ echo -e "${blue}INFO: Configuring ssh for root to overcloud nodes...${reset}"
+ # copy host key to instack
+ scp ${SSH_OPTIONS[@]} /root/.ssh/id_rsa.pub "stack@$UNDERCLOUD":jumphost_id_rsa.pub
+
+ # add host key to overcloud nodes authorized keys
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
+source stackrc
+nodes=\$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+for node in \$nodes; do
+cat ~/jumphost_id_rsa.pub | ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" 'cat >> ~/.ssh/authorized_keys'
+done
+EOI
+
+ echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
+ for network in ${opnfv_attach_networks}; do
+ ovs_ip=$(find_ip ${NET_MAP[$network]})
+ tmp_ip=''
+ if [ -n "$ovs_ip" ]; then
+ echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
+ else
+ echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
+ # use last IP of allocation pool
+ eval "ip_range=\${${network}_usable_ip_range}"
+ ovs_ip=${ip_range##*,}
+ eval "net_cidr=\${${network}_cidr}"
+ sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
+ sudo ip link set up ${NET_MAP[$network]}
+ tmp_ip=$(find_ip ${NET_MAP[$network]})
+ if [ -n "$tmp_ip" ]; then
+ echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
+ continue
+ else
+ echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
+ return 1
+ fi
+ fi
+ done
+
+ if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
+ echo -e "${blue}INFO: Bringing up br-phy and ovs-agent for dpdk compute nodes...${reset}"
+ compute_nodes=$(undercloud_connect stack "source stackrc; nova list | grep compute | wc -l")
+ i=0
+ while [ "$i" -lt "$compute_nodes" ]; do
+ overcloud_connect compute${i} "sudo ifup br-phy; sudo systemctl restart neutron-openvswitch-agent"
+ i=$((i + 1))
+ done
+ fi
+
+ # TODO fix this when HA SDN controllers are supported
+ if [ "${deploy_options_array['sdn_controller']}" != 'False' ]; then
+ echo -e "${blue}INFO: Finding SDN Controller IP for overcloudrc...${reset}"
+ sdn_controller_ip=$(overcloud_connect controller0 "facter ipaddress_br_ex")
+ echo -e "${blue}INFO: SDN Controller IP is ${sdn_controller_ip} ${reset}"
+ undercloud_connect stack "echo 'export SDN_CONTROLLER_IP=${sdn_controller_ip}' >> /home/stack/overcloudrc"
+ fi
+
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+source overcloudrc
+set -o errexit
+echo "Configuring Neutron external network"
+neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }')
+neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
+
+echo "Removing sahara endpoint and service"
+sahara_service_id=\$(openstack service list | grep sahara | cut -d ' ' -f 2)
+sahara_endpoint_id=\$(openstack endpoint list | grep sahara | cut -d ' ' -f 2)
+openstack endpoint delete \$sahara_endpoint_id
+openstack service delete \$sahara_service_id
+
+echo "Removing swift endpoint and service"
+swift_service_id=\$(openstack service list | grep swift | cut -d ' ' -f 2)
+swift_endpoint_id=\$(openstack endpoint list | grep swift | cut -d ' ' -f 2)
+openstack endpoint delete \$swift_endpoint_id
+openstack service delete \$swift_service_id
+
+if [ "${deploy_options_array['congress']}" == 'True' ]; then
+ for s in nova neutronv2 ceilometer cinder glancev2 keystone; do
+ openstack congress datasource create \$s "\$s" \\
+ --config username=\$OS_USERNAME \\
+ --config tenant_name=\$OS_TENANT_NAME \\
+ --config password=\$OS_PASSWORD \\
+ --config auth_url=\$OS_AUTH_URL
+ done
+ openstack congress datasource create doctor "doctor"
+fi
+EOI
+
+ # for virtual, we NAT public network through Undercloud
+ if [ "$virtual" == "TRUE" ]; then
+ if ! configure_undercloud_nat ${public_network_cidr}; then
+ echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${public_network_cidr}${reset}"
+ exit 1
+ else
+ echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud public network${reset}"
+ fi
+ fi
+
+ # for sfc deployments we need the vxlan workaround
+ if [ "${deploy_options_array['sfc']}" == 'True' ]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+source stackrc
+set -o errexit
+for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
+ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
+sudo ifconfig br-int up
+sudo ip route add 123.123.123.0/24 dev br-int
+EOF
+done
+EOI
+ fi
+
+ # Collect deployment logs
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+mkdir -p ~/deploy_logs
+rm -rf deploy_logs/*
+source stackrc
+set -o errexit
+for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
+ ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
+ sudo cp /var/log/messages /home/heat-admin/messages.log
+ sudo chown heat-admin /home/heat-admin/messages.log
+EOF
+scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
+if [ "$debug" == "TRUE" ]; then
+ nova list --ip \$node
+ echo "---------------------------"
+ echo "-----/var/log/messages-----"
+ echo "---------------------------"
+ cat ~/deploy_logs/\$node.messages.log
+ echo "---------------------------"
+ echo "----------END LOG----------"
+ echo "---------------------------"
+fi
+ ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
+ sudo rm -f /home/heat-admin/messages.log
+EOF
+done
+
+# Print out the undercloud IP and dashboard URL
+source stackrc
+echo "Undercloud IP: $UNDERCLOUD, please connect by doing 'opnfv-util undercloud'"
+echo "Overcloud dashboard available at http://\$(heat output-show overcloud PublicVip | sed 's/"//g')/dashboard"
+EOI
+}
diff --git a/lib/python/apex/network_environment.py b/lib/python/apex/network_environment.py
index fec6299d..bf4bdc79 100644
--- a/lib/python/apex/network_environment.py
+++ b/lib/python/apex/network_environment.py
@@ -9,11 +9,13 @@
import yaml
import re
-from .common.constants import ADMIN_NETWORK
-from .common.constants import PRIVATE_NETWORK
-from .common.constants import STORAGE_NETWORK
-from .common.constants import PUBLIC_NETWORK
-from .common.constants import API_NETWORK
+from .common.constants import (
+ ADMIN_NETWORK,
+ PRIVATE_NETWORK,
+ STORAGE_NETWORK,
+ PUBLIC_NETWORK,
+ API_NETWORK,
+)
PORTS = '/ports'
# Resources defined by <resource name>: <prefix>
diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh
new file mode 100755
index 00000000..f829e980
--- /dev/null
+++ b/lib/undercloud-functions.sh
@@ -0,0 +1,277 @@
+#!/usr/bin/env bash
+##############################################################################
+# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+##verify vm exists, an has a dhcp lease assigned to it
+##params: none
+function setup_undercloud_vm {
+ if ! virsh list --all | grep undercloud > /dev/null; then
+ undercloud_nets="default admin_network"
+ if [[ $enabled_network_list =~ "public_network" ]]; then
+ undercloud_nets+=" public_network"
+ fi
+ define_vm undercloud hd 30 "$undercloud_nets" 4 12288
+
+ ### this doesn't work for some reason I was getting hangup events so using cp instead
+ #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2
+ #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
+ #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
+ #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
+ #error: cannot close volume undercloud.qcow2
+ #error: internal error: received hangup / error event on socket
+ #error: Reconnected to the hypervisor
+
+ local undercloud_dst=/var/lib/libvirt/images/undercloud.qcow2
+ cp -f $RESOURCES/undercloud.qcow2 $undercloud_dst
+
+ # resize Undercloud machine
+ echo "Checking if Undercloud needs to be resized..."
+ undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
+ if [ "$undercloud_size" -lt 30 ]; then
+ qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
+ LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $undercloud_dst
+ LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
+ new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $undercloud_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
+ if [ "$new_size" -lt 30 ]; then
+ echo "Error resizing Undercloud machine, disk size is ${new_size}"
+ exit 1
+ else
+ echo "Undercloud successfully resized"
+ fi
+ else
+ echo "Skipped Undercloud resize, upstream is large enough"
+ fi
+
+ else
+ echo "Found existing Undercloud VM, exiting."
+ exit 1
+ fi
+
+ # if the VM is not running update the authkeys and start it
+ if ! virsh list | grep undercloud > /dev/null; then
+ echo "Injecting ssh key to Undercloud VM"
+ LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \
+ --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
+ --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
+ --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
+ --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
+ virsh start undercloud
+ fi
+
+ sleep 10 # let undercloud get started up
+
+ # get the undercloud VM IP
+ CNT=10
+ echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
+ undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
+ while ! $(arp -e | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
+ echo -n "."
+ sleep 10
+ CNT=$((CNT-1))
+ done
+ UNDERCLOUD=$(arp -e | grep ${undercloud_mac} | awk {'print $1'})
+
+ if [ -z "$UNDERCLOUD" ]; then
+ echo "\n\nCan't get IP for Undercloud. Can Not Continue."
+ exit 1
+ else
+ echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}"
+ fi
+
+ CNT=10
+ echo -en "${blue}\rValidating Undercloud VM connectivity${reset}"
+ while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
+ echo -n "."
+ sleep 3
+ CNT=$((CNT-1))
+ done
+ if [ "$CNT" -eq 0 ]; then
+ echo "Failed to contact Undercloud. Can Not Continue"
+ exit 1
+ fi
+ CNT=10
+ while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
+ echo -n "."
+ sleep 3
+ CNT=$((CNT-1))
+ done
+ if [ "$CNT" -eq 0 ]; then
+ echo "Failed to connect to Undercloud. Can Not Continue"
+ exit 1
+ fi
+
+ # extra space to overwrite the previous connectivity output
+ echo -e "${blue}\r ${reset}"
+ sleep 1
+
+ # ssh key fix for stack user
+ ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
+}
+
+##Copy over the glance images and instackenv json file
+##params: none
+function configure_undercloud {
+ local controller_nic_template compute_nic_template
+ echo
+ echo "Copying configuration files to Undercloud"
+ if [[ "$net_isolation_enabled" == "TRUE" ]]; then
+ echo -e "${blue}Network Environment set for Deployment: ${reset}"
+ cat /tmp/network-environment.yaml
+ scp ${SSH_OPTIONS[@]} /tmp/network-environment.yaml "stack@$UNDERCLOUD":
+
+ # check for ODL L3/ONOS
+ if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
+ ext_net_type=br-ex
+ fi
+
+ if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
+ ovs_dpdk_bridge='br-phy'
+ else
+ ovs_dpdk_bridge=''
+ fi
+
+ if ! controller_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -i $net_isolation_enabled -t $CONFIG/nics-template.yaml.jinja2 -n "$enabled_network_list" -e "br-ex" -af $ip_addr_family); then
+ echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
+ exit 1
+ fi
+
+ if ! compute_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -i $net_isolation_enabled -t $CONFIG/nics-template.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family -d "$ovs_dpdk_bridge"); then
+ echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
+ exit 1
+ fi
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
+mkdir nics/
+cat > nics/controller.yaml << EOF
+$controller_nic_template
+EOF
+cat > nics/compute.yaml << EOF
+$compute_nic_template
+EOF
+EOI
+ fi
+
+ # ensure stack user on Undercloud machine has an ssh key
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
+
+ if [ "$virtual" == "TRUE" ]; then
+
+ # copy the Undercloud VM's stack user's pub key to
+ # root's auth keys so that Undercloud can control
+ # vm power on the hypervisor
+ ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
+
+ DEPLOY_OPTIONS+=" --libvirt-type qemu"
+ INSTACKENV=$CONFIG/instackenv-virt.json
+
+ # upload instackenv file to Undercloud for virtual deployment
+ scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
+ fi
+
+ # allow stack to control power management on the hypervisor via sshkey
+ # only if this is a virtual deployment
+ if [ "$virtual" == "TRUE" ]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+while read -r line; do
+ stack_key=\${stack_key}\\\\\\\\n\${line}
+done < <(cat ~/.ssh/id_rsa)
+stack_key=\$(echo \$stack_key | sed 's/\\\\\\\\n//')
+sed -i 's~INSERT_STACK_USER_PRIV_KEY~'"\$stack_key"'~' instackenv.json
+EOI
+ fi
+
+ # copy stack's ssh key to this users authorized keys
+ ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> ~/.ssh/authorized_keys
+
+ # disable requiretty for sudo
+ ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
+
+ # configure undercloud on Undercloud VM
+ echo "Running undercloud configuration."
+ echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
+if [[ "$net_isolation_enabled" == "TRUE" ]]; then
+ sed -i 's/#local_ip/local_ip/' undercloud.conf
+ sed -i 's/#network_gateway/network_gateway/' undercloud.conf
+ sed -i 's/#network_cidr/network_cidr/' undercloud.conf
+ sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
+ sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
+ sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
+ sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
+
+ openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
+ openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
+ openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
+ openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
+ openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
+ openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
+ openstack-config --set undercloud.conf DEFAULT undercloud_debug false
+
+fi
+
+sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+
+# we assume that packages will not need to be updated with undercloud install
+# and that it will be used only to configure the undercloud
+# packages updates would need to be handled manually with yum update
+sudo cp -f /usr/share/diskimage-builder/elements/yum/bin/install-packages /usr/share/diskimage-builder/elements/yum/bin/install-packages.bak
+cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-packages > /dev/null
+#!/bin/sh
+exit 0
+EOF
+
+openstack undercloud install &> apex-undercloud-install.log || {
+ # cat the undercloud install log incase it fails
+ echo "ERROR: openstack undercloud install has failed. Dumping Log:"
+ cat apex-undercloud-install.log
+ exit 1
+}
+
+sleep 30
+sudo systemctl restart openstack-glance-api
+sudo systemctl restart openstack-nova-conductor
+sudo systemctl restart openstack-nova-compute
+
+sudo sed -i '/num_engine_workers/c\num_engine_workers = 2' /etc/heat/heat.conf
+sudo sed -i '/#workers\s=/c\workers = 2' /etc/heat/heat.conf
+sudo systemctl restart openstack-heat-engine
+sudo systemctl restart openstack-heat-api
+EOI
+
+# configure external network
+ ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" << EOI
+if [[ "$public_network_vlan" != "native" ]]; then
+ cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${public_network_vlan}
+DEVICE=vlan${public_network_vlan}
+ONBOOT=yes
+DEVICETYPE=ovs
+TYPE=OVSIntPort
+BOOTPROTO=static
+IPADDR=${public_network_provisioner_ip}
+PREFIX=${public_network_cidr##*/}
+OVS_BRIDGE=br-ctlplane
+OVS_OPTIONS="tag=${public_network_vlan}"
+EOF
+ ifup vlan${public_network_vlan}
+else
+ if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then
+ ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2
+ ip link set up dev eth2
+ fi
+fi
+EOI
+
+# WORKAROUND: must restart the above services to fix sync problem with nova compute manager
+# TODO: revisit and file a bug if necessary. This should eventually be removed
+# as well as glance api problem
+echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}"
+sleep 15
+
+}
diff --git a/lib/virtual-setup-functions.sh b/lib/virtual-setup-functions.sh
new file mode 100755
index 00000000..e7410da7
--- /dev/null
+++ b/lib/virtual-setup-functions.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+##############################################################################
+# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+##Create virtual nodes in virsh
+##params: vcpus, ramsize
+function setup_virtual_baremetal {
+ local vcpus ramsize
+ if [ -z "$1" ]; then
+ vcpus=4
+ ramsize=8192
+ elif [ -z "$2" ]; then
+ vcpus=$1
+ ramsize=8192
+ else
+ vcpus=$1
+ ramsize=$(($2*1024))
+ fi
+ #start by generating the opening json for instackenv.json
+ cat > $CONFIG/instackenv-virt.json << EOF
+{
+ "nodes": [
+EOF
+
+ # next create the virtual machines and add their definitions to the file
+ if [ "$ha_enabled" == "False" ]; then
+ # 1 controller + computes
+ # zero based so just pass compute count
+ vm_index=$VM_COMPUTES
+ else
+ # 3 controller + computes
+ # zero based so add 2 to compute count
+ vm_index=$((2+$VM_COMPUTES))
+ fi
+
+ for i in $(seq 0 $vm_index); do
+ if ! virsh list --all | grep baremetal${i} > /dev/null; then
+ define_vm baremetal${i} network 41 'admin_network' $vcpus $ramsize
+ for n in private_network public_network storage_network api_network; do
+ if [[ $enabled_network_list =~ $n ]]; then
+ echo -n "$n "
+ virsh attach-interface --domain baremetal${i} --type network --source $n --model virtio --config
+ fi
+ done
+ else
+ echo "Found Baremetal ${i} VM, using existing VM"
+ fi
+ #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
+ mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
+
+ if [ "$VM_COMPUTES" -gt 0 ]; then
+ capability="profile:compute"
+ VM_COMPUTES=$((VM_COMPUTES - 1))
+ else
+ capability="profile:control"
+ fi
+
+ cat >> $CONFIG/instackenv-virt.json << EOF
+ {
+ "pm_addr": "192.168.122.1",
+ "pm_user": "root",
+ "pm_password": "INSERT_STACK_USER_PRIV_KEY",
+ "pm_type": "pxe_ssh",
+ "mac": [
+ "$mac"
+ ],
+ "cpu": "$vcpus",
+ "memory": "$ramsize",
+ "disk": "41",
+ "arch": "x86_64",
+ "capabilities": "$capability"
+ },
+EOF
+ done
+
+ #truncate the last line to remove the comma behind the bracket
+ tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{}
+
+ #finally reclose the bracket and close the instackenv.json file
+ cat >> $CONFIG/instackenv-virt.json << EOF
+ }
+ ],
+ "arch": "x86_64",
+ "host-ip": "192.168.122.1",
+ "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager",
+ "seed-ip": "",
+ "ssh-key": "INSERT_STACK_USER_PRIV_KEY",
+ "ssh-user": "root"
+}
+EOF
+ #Overwrite the tripleo-inclubator domain.xml with our own, keeping a backup.
+ if [ ! -f /usr/share/tripleo/templates/domain.xml.bak ]; then
+ /usr/bin/mv -f /usr/share/tripleo/templates/domain.xml /usr/share/tripleo/templates/domain.xml.bak
+ fi
+
+ /usr/bin/cp -f $LIB/installer/domain.xml /usr/share/tripleo/templates/domain.xml
+}
+
+##Create virtual nodes in virsh
+##params: name - String: libvirt name for VM
+## bootdev - String: boot device for the VM
+## disksize - Number: size of the disk in GB
+## ovs_bridges: - List: list of ovs bridges
+## vcpus - Number of VCPUs to use (defaults to 4)
+## ramsize - Size of RAM for VM in MB (defaults to 8192)
+function define_vm () {
+ local vcpus ramsize
+
+ if [ -z "$5" ]; then
+ vcpus=4
+ ramsize=8388608
+ elif [ -z "$6" ]; then
+ vcpus=$5
+ ramsize=8388608
+ else
+ vcpus=$5
+ ramsize=$(($6*1024))
+ fi
+
+ # Create the libvirt storage volume
+ if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then
+ volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2")
+ echo "Volume ${1} exists. Deleting Existing Volume $volume_path"
+ virsh vol-dumpxml ${1}.qcow2 --pool default > /dev/null || echo '' #ok for this to fail
+ touch $volume_path
+ virsh vol-delete ${1}.qcow2 --pool default
+ fi
+ virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2
+ volume_path=$(virsh vol-path --pool default ${1}.qcow2)
+ if [ ! -f $volume_path ]; then
+ echo "$volume_path Not created successfully... Aborting"
+ exit 1
+ fi
+
+ # create the VM
+ /usr/libexec/openstack-tripleo/configure-vm --name $1 \
+ --bootdev $2 \
+ --image "$volume_path" \
+ --diskbus sata \
+ --arch x86_64 \
+ --cpus $vcpus \
+ --memory $ramsize \
+ --libvirt-nic-driver virtio \
+ --baremetal-interface $4
+}
diff --git a/tests/test_apex_deploy_env.py b/tests/test_apex_deploy_env.py
index 648923d0..563bfd8f 100644
--- a/tests/test_apex_deploy_env.py
+++ b/tests/test_apex_deploy_env.py
@@ -27,7 +27,8 @@ deploy_files = ('deploy_settings.yaml',
'os-nosdn-performance-ha.yaml',
'os-odl_l2-nofeature-ha.yaml',
'os-odl_l2-sfc-noha.yaml',
- 'os-onos-nofeature-ha.yaml')
+ 'os-onos-nofeature-ha.yaml',
+ 'os-onos-sfc-ha.yaml')
test_deploy_content = (
'global_params:',