summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x[-rw-r--r--]compass/deploy/func.sh17
-rw-r--r--foreman/ci/Vagrantfile7
-rwxr-xr-xforeman/ci/bootstrap.sh5
-rwxr-xr-xforeman/ci/clean.sh81
-rwxr-xr-xforeman/ci/deploy.sh1335
-rw-r--r--foreman/ci/inventory/lf_pod2_ksgen_settings.yml36
-rw-r--r--foreman/ci/opnfv_ksgen_settings.yml5
-rw-r--r--foreman/ci/opnfv_ksgen_settings_no_HA.yml264
-rw-r--r--foreman/ci/reload_playbook.yml1
-rwxr-xr-xforeman/ci/vm_nodes_provision.sh50
-rw-r--r--foreman/docs/src/installation-instructions.rst16
-rw-r--r--fuel/deploy/libvirt/vms/compute.xml1
-rw-r--r--fuel/deploy/libvirt/vms/controller.xml1
-rw-r--r--fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute41
-rw-r--r--fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute51
-rw-r--r--fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller11
16 files changed, 1305 insertions, 517 deletions
diff --git a/compass/deploy/func.sh b/compass/deploy/func.sh
index 29c2c23..49ea947 100644..100755
--- a/compass/deploy/func.sh
+++ b/compass/deploy/func.sh
@@ -1,19 +1,22 @@
function tear_down_machines() {
- virtmachines=$(virsh list --name |grep pxe)
+ virtmachines=$(sudo virsh list --name |grep pxe)
for virtmachine in $virtmachines; do
echo "destroy $virtmachine"
- virsh destroy $virtmachine
+ sudo virsh destroy $virtmachine
if [[ "$?" != "0" ]]; then
echo "destroy instance $virtmachine failed"
exit 1
fi
done
- virtmachines=$(virsh list --all --name |grep pxe)
- for virtmachine in $virtmachines; do
- echo "undefine $virtmachine"
- virsh undefine $virtmachine
+
+ sudo virsh list --all|grep shut|awk '{print $2}'|xargs -n 1 sudo virsh undefine
+
+ vol_names=$(sudo virsh vol-list default |grep .img | awk '{print $1}')
+ for vol_name in $vol_names; do
+ echo "virsh vol-delete $vol_name"
+ sudo virsh vol-delete $vol_name --pool default
if [[ "$?" != "0" ]]; then
- echo "undefine instance $virtmachine failed"
+ echo "vol-delete $vol_name failed!"
exit 1
fi
done
diff --git a/foreman/ci/Vagrantfile b/foreman/ci/Vagrantfile
index 100e12d..a01da70 100644
--- a/foreman/ci/Vagrantfile
+++ b/foreman/ci/Vagrantfile
@@ -41,6 +41,9 @@ Vagrant.configure(2) do |config|
default_gw = ""
nat_flag = false
+ # Disable dhcp flag
+ disable_dhcp_flag = false
+
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
@@ -90,4 +93,8 @@ Vagrant.configure(2) do |config|
config.vm.provision :shell, path: "nat_setup.sh"
end
config.vm.provision :shell, path: "bootstrap.sh"
+ if disable_dhcp_flag
+ config.vm.provision :shell, :inline => "systemctl stop dhcpd"
+ config.vm.provision :shell, :inline => "systemctl disable dhcpd"
+ end
end
diff --git a/foreman/ci/bootstrap.sh b/foreman/ci/bootstrap.sh
index 4bc22ed..c98f00e 100755
--- a/foreman/ci/bootstrap.sh
+++ b/foreman/ci/bootstrap.sh
@@ -25,8 +25,7 @@ green=`tput setaf 2`
yum install -y epel-release-7*
# Install other required packages
-# Major version is pinned to force some consistency for Arno
-if ! yum -y install python-pip-1* python-virtualenv-1* gcc-4* git-1* sshpass-1* ansible-1* python-requests-1*; then
+if ! yum -y install python-pip python-virtualenv gcc git sshpass ansible python-requests; then
printf '%s\n' 'bootstrap.sh: failed to install required packages' >&2
exit 1
fi
@@ -36,7 +35,7 @@ cd /opt
echo "Cloning khaleesi to /opt"
if [ ! -d khaleesi ]; then
- if ! git clone -b v1.0 https://github.com/trozet/khaleesi.git; then
+ if ! git clone -b opnfv https://github.com/trozet/khaleesi.git; then
printf '%s\n' 'bootstrap.sh: Unable to git clone khaleesi' >&2
exit 1
fi
diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh
index f61ac93..1a16efd 100755
--- a/foreman/ci/clean.sh
+++ b/foreman/ci/clean.sh
@@ -5,7 +5,7 @@
#
#Uses Vagrant and VirtualBox
#
-#Destroys Vagrant VM running in /tmp/bgs_vagrant
+#Destroys Vagrant VMs running in $vm_dir/
#Shuts down all nodes found in Khaleesi settings
#Removes hypervisor kernel modules (VirtualBox)
@@ -14,6 +14,8 @@ reset=`tput sgr0`
blue=`tput setaf 4`
red=`tput setaf 1`
green=`tput setaf 2`
+
+vm_dir=/var/opt/opnfv
##END VARS
##FUNCTIONS
@@ -85,7 +87,7 @@ node_counter=0
output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
for line in ${output} ; do
bmc_pass[$node_counter]=$line
- ((node_counter++))
+ ((node_counter++))
done
for mynode in `seq 0 $max_nodes`; do
@@ -106,35 +108,78 @@ else
skip_vagrant=1
fi
+###legacy VM location check
+###remove me later
+if [ -d /tmp/bgs_vagrant ]; then
+ cd /tmp/bgs_vagrant
+ vagrant destroy -f
+ rm -rf /tmp/bgs_vagrant
+fi
+
###destroy vagrant
if [ $skip_vagrant -eq 0 ]; then
- cd /tmp/bgs_vagrant
- if vagrant destroy -f; then
- echo "${blue}Successfully destroyed Foreman VM ${reset}"
+ if [ -d $vm_dir ]; then
+ ##all vm directories
+ for vm in $( ls $vm_dir ); do
+ cd $vm_dir/$vm
+ if vagrant destroy -f; then
+ echo "${blue}Successfully destroyed $vm Vagrant VM ${reset}"
+ else
+ echo "${red}Unable to destroy $vm Vagrant VM! Attempting to killall vagrant if process is hung ${reset}"
+ killall vagrant
+ echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
+ if ps axf | grep vagrant; then
+ echo "${red}Vagrant process still exists after kill...exiting ${reset}"
+ exit 1
+ else
+ echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}"
+ fi
+ fi
+
+ ##Vagrant boxes appear as VboxHeadless processes
+ ##try to gracefully destroy the VBox VM if it still exists
+ if vboxmanage list runningvms | grep $vm; then
+ echo "${red} $vm VBoxHeadless process still exists...Removing${reset}"
+ vbox_id=$(vboxmanage list runningvms | grep $vm | awk '{print $1}' | sed 's/"//g')
+ vboxmanage controlvm $vbox_id poweroff
+ if vboxmanage unregistervm --delete $vbox_id; then
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ else
+ echo "${red} Unable to delete VM $vm ...Exiting ${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ fi
+ done
else
- echo "${red}Unable to destroy Foreman VM ${reset}"
- echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
- if ps axf | grep vagrant; then
- echo "${red}Vagrant VM still exists...exiting ${reset}"
- exit 1
- else
- echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}"
- fi
+ echo "${blue}${vm_dir} doesn't exist, no VMs in OPNFV directory to destroy! ${reset}"
fi
+ echo "${blue}Checking for any remaining virtual box processes...${reset}"
###kill virtualbox
- echo "${blue}Killing VirtualBox ${reset}"
- killall virtualbox
- killall VBoxHeadless
+ if ps axf | grep virtualbox; then
+ echo "${blue}virtualbox processes are still running. Killing any remaining VirtualBox processes...${reset}"
+ killall virtualbox
+ fi
+
+ ###kill any leftover VMs (brute force)
+ if ps axf | grep VBoxHeadless; then
+ echo "${blue}VBoxHeadless processes are still running. Killing any remaining VBoxHeadless processes...${reset}"
+ killall VBoxHeadless
+ fi
###remove virtualbox
- echo "${blue}Removing VirtualBox ${reset}"
+ echo "${blue}Removing VirtualBox... ${reset}"
yum -y remove $vboxpkg
else
- echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}"
+ echo "${blue}Skipping Vagrant destroy + VBox Removal as VirtualBox package is already removed ${reset}"
fi
+###remove working vm directory
+echo "${blue}Removing working VM directory: $vm_dir ${reset}"
+rm -rf $vm_dir
###remove kernel modules
echo "${blue}Removing kernel modules ${reset}"
diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh
index 31d41d2..a05b3de 100755
--- a/foreman/ci/deploy.sh
+++ b/foreman/ci/deploy.sh
@@ -25,6 +25,11 @@ red=`tput setaf 1`
green=`tput setaf 2`
declare -A interface_arr
+declare -A controllers_ip_arr
+declare -A admin_ip_arr
+declare -A public_ip_arr
+
+vm_dir=/var/opt/opnfv
##END VARS
##FUNCTIONS
@@ -35,6 +40,28 @@ display_usage() {
echo -e "\n -no_parse : No variable parsing into config. Flag. \n"
echo -e "\n -base_config : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: -base_config /opt/myinventory.yml \n"
echo -e "\n -virtual : Node virtualization instead of baremetal. Flag. \n"
+ echo -e "\n -enable_virtual_dhcp : Run dhcp server instead of using static IPs. Use this with -virtual only. \n"
+ echo -e "\n -static_ip_range : static IP range to define when using virtual and when dhcp is not being used (default), must at least a 20 IP block. Format: '192.168.1.1,192.168.1.20' \n"
+ echo -e "\n -ping_site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n"
+ echo -e "\n -floating_ip_count : number of IP address from the public range to be used for floating IP. Default is 20.\n"
+}
+
+##verify vm dir exists
+##params: none
+function verify_vm_dir {
+ if [ -d "$vm_dir" ]; then
+ echo -e "\n\n${red}ERROR: VM Directory: $vm_dir already exists. Environment not clean. Please use clean.sh. Exiting${reset}\n\n"
+ exit 1
+ else
+ mkdir -p $vm_dir
+ fi
+
+ chmod 700 $vm_dir
+
+ if [ ! -d $vm_dir ]; then
+ echo -e "\n\n${red}ERROR: Unable to create VM Directory: $vm_dir Exiting${reset}\n\n"
+ exit -1
+ fi
}
##find ip of interface
@@ -51,6 +78,41 @@ function find_subnet {
printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
}
+##verify subnet has at least n IPs
+##params: subnet mask, n IPs
+function verify_subnet_size {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ num_ips_required=$2
+
+ ##this function assumes you would never need more than 254
+ ##we check here to make sure
+ if [ "$num_ips_required" -ge 254 ]; then
+ echo -e "\n\n${red}ERROR: allocating more than 254 IPs is unsupported...Exiting${reset}\n\n"
+ return 1
+ fi
+
+ ##we just return if 3rd octet is not 255
+ ##because we know the subnet is big enough
+ if [ "$i3" -ne 255 ]; then
+ return 0
+ elif [ $((254-$i4)) -ge "$num_ips_required" ]; then
+ return 0
+ else
+ echo -e "\n\n${red}ERROR: Subnet is too small${reset}\n\n"
+ return 1
+ fi
+}
+
+##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask
+## Warning: This function only works for IPv4 at the moment.
+##params: ip, netmask
+function find_last_ip_subnet {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ IFS=. read -r m1 m2 m3 m4 <<< "$2"
+ IFS=. read -r s1 s2 s3 s4 <<< "$((i1 & m1)).$((i2 & m2)).$((i3 & m3)).$((i4 & m4))"
+ printf "%d.%d.%d.%d\n" "$((255 - $m1 + $s1))" "$((255 - $m2 + $s2))" "$((255 - $m3 + $s3))" "$((255 - $m4 + $s4 - 1))"
+}
+
##increments subnet by a value
##params: ip, value
##assumes low value
@@ -87,6 +149,19 @@ function next_ip {
echo $baseaddr.$lsv
}
+##subtracts a value from an IP address
+##params: last ip, ip_count
+##assumes ip_count is less than the last octect of the address
+subtract_ip() {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ ip_count=$2
+ if [ $i4 -lt $ip_count ]; then
+ echo -e "\n\n${red}ERROR: Can't subtract $ip_count from IP address $1 Exiting${reset}\n\n"
+ exit 1
+ fi
+ printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 - $ip_count ))"
+}
+
##removes the network interface config from Vagrantfile
##params: interface
##assumes you are in the directory of Vagrantfile
@@ -149,19 +224,21 @@ parse_yaml() {
}'
}
-##END FUNCTIONS
-
-if [[ ( $1 == "--help") || $1 == "-h" ]]; then
+##translates the command line paramaters into variables
+##params: $@ the entire command line is passed
+##usage: parse_cmd_line() "$@"
+parse_cmdline() {
+ if [[ ( $1 == "--help") || $1 == "-h" ]]; then
display_usage
exit 0
-fi
+ fi
-echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
-echo "Use -h to display help"
-sleep 2
+ echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
+ echo "Use -h to display help"
+ sleep 2
-while [ "`echo $1 | cut -c1`" = "-" ]
-do
+ while [ "`echo $1 | cut -c1`" = "-" ]
+ do
echo $1
case "$1" in
-base_config)
@@ -176,35 +253,84 @@ do
virtual="TRUE"
shift 1
;;
+ -enable_virtual_dhcp)
+ enable_virtual_dhcp="TRUE"
+ shift 1
+ ;;
+ -static_ip_range)
+ static_ip_range=$2
+ shift 2
+ ;;
+ -ping_site)
+ ping_site=$2
+ shift 2
+ ;;
+ -floating_ip_count)
+ floating_ip_count=$2
+ shift 2
+ ;;
*)
display_usage
exit 1
;;
-esac
-done
+ esac
+ done
+
+ if [ ! -z "$enable_virtual_dhcp" ] && [ ! -z "$static_ip_range" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. Static IP range cannot be set when using DHCP!. Exiting${reset}\n\n"
+ exit 1
+ fi
+
+ if [ -z "$virtual" ]; then
+ if [ ! -z "$enable_virtual_dhcp" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. enable_virtual_dhcp can only be set when using -virtual!. Exiting${reset}\n\n"
+ exit 1
+ elif [ ! -z "$static_ip_range" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. static_ip_range can only be set when using -virtual!. Exiting${reset}\n\n"
+ exit 1
+ fi
+ fi
+
+ if [ -z "$floating_ip_count" ]; then
+ floating_ip_count=20
+ fi
+}
##disable selinux
-/sbin/setenforce 0
-
-# Install EPEL repo for access to many other yum repos
-# Major version is pinned to force some consistency for Arno
-yum install -y epel-release-7*
-
-# Install other required packages
-# Major versions are pinned to force some consistency for Arno
-if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
- printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
- exit 1
-fi
-
-##install VirtualBox repo
-if cat /etc/*release | grep -i "Fedora release"; then
- vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch
-else
- vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch
-fi
-
-cat > /etc/yum.repos.d/virtualbox.repo << EOM
+##params: none
+##usage: disable_selinux()
+disable_selinux() {
+ /sbin/setenforce 0
+}
+
+##Install the EPEL repository and additional packages
+##params: none
+##usage: install_EPEL()
+install_EPEL() {
+ # Install EPEL repo for access to many other yum repos
+ # Major version is pinned to force some consistency for Arno
+ yum install -y epel-release-7*
+
+ # Install other required packages
+ # Major versions are pinned to force some consistency for Arno
+ if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
+ printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
+ exit 1
+ fi
+}
+
+##Download and install virtual box
+##params: none
+##usage: install_vbox()
+install_vbox() {
+ ##install VirtualBox repo
+ if cat /etc/*release | grep -i "Fedora release"; then
+ vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch
+ else
+ vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch
+ fi
+
+ cat > /etc/yum.repos.d/virtualbox.repo << EOM
[virtualbox]
name=Oracle Linux / RHEL / CentOS-\$releasever / \$basearch - VirtualBox
baseurl=$vboxurl
@@ -215,365 +341,307 @@ skip_if_unavailable = 1
keepcache = 0
EOM
-##install VirtualBox
-if ! yum list installed | grep -i virtualbox; then
- if ! yum -y install VirtualBox-4.3; then
- printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2
- exit 1
+ ##install VirtualBox
+ if ! yum list installed | grep -i virtualbox; then
+ if ! yum -y install VirtualBox-4.3; then
+ printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2
+ exit 1
+ fi
fi
-fi
-##install kmod-VirtualBox
-if ! lsmod | grep vboxdrv; then
- sudo /etc/init.d/vboxdrv setup
+ ##install kmod-VirtualBox
if ! lsmod | grep vboxdrv; then
- printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
- exit 1
+ sudo /etc/init.d/vboxdrv setup
+ if ! lsmod | grep vboxdrv; then
+ printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed'
-fi
-
-##install Ansible
-if ! yum list installed | grep -i ansible; then
- if ! yum -y install ansible-1*; then
- printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
- exit 1
+}
+
+##install Ansible using yum
+##params: none
+##usage: install_ansible()
+install_ansible() {
+ if ! yum list installed | grep -i ansible; then
+ if ! yum -y install ansible-1*; then
+ printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
+ exit 1
+ fi
fi
-fi
+}
-##install Vagrant
-if ! rpm -qa | grep vagrant; then
- if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
- printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2
- exit 1
+##install Vagrant RPM directly with the bintray.com site
+##params: none
+##usage: install_vagrant()
+install_vagrant() {
+ if ! rpm -qa | grep vagrant; then
+ if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
+ printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.'
-fi
-
-##add centos 7 box to vagrant
-if ! vagrant box list | grep chef/centos-7.0; then
- if ! vagrant box add chef/centos-7.0 --provider virtualbox; then
- printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2
- exit 1
+
+ ##add centos 7 box to vagrant
+ if ! vagrant box list | grep chef/centos-7.0; then
+ if ! vagrant box add chef/centos-7.0 --provider virtualbox; then
+ printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.'
-fi
-
-##install workaround for centos7
-if ! vagrant plugin list | grep vagrant-centos7_fix; then
- if ! vagrant plugin install vagrant-centos7_fix; then
- printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2
+
+ ##install workaround for centos7
+ if ! vagrant plugin list | grep vagrant-centos7_fix; then
+ if ! vagrant plugin install vagrant-centos7_fix; then
+ printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.'
-fi
+}
-cd /tmp/
##remove bgs vagrant incase it wasn't cleaned up
-rm -rf /tmp/bgs_vagrant
-
-##clone bgs vagrant
-##will change this to be opnfv repo when commit is done
-if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then
- printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
- exit 1
-fi
-
-cd bgs_vagrant
-
-echo "${blue}Detecting network configuration...${reset}"
-##detect host 1 or 3 interface configuration
-#output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
-output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
-
-if [ ! "$output" ]; then
- printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
- exit 1
-fi
+##params: none
+##usage: clean_tmp()
+clean_tmp() {
+ rm -rf $vm_dir/foreman_vm
+}
-##find number of interfaces with ip and substitute in VagrantFile
-if_counter=0
-for interface in ${output}; do
+##clone genesis and move to node vm dir
+##params: none
+##usage: clone_bgs
+clone_bgs() {
+ cd /tmp/
+ rm -rf /tmp/genesis/
- if [ "$if_counter" -ge 4 ]; then
- break
- fi
- interface_ip=$(find_ip $interface)
- if [ ! "$interface_ip" ]; then
- continue
- fi
- new_ip=$(next_usable_ip $interface_ip)
- if [ ! "$new_ip" ]; then
- continue
- fi
- interface_arr[$interface]=$if_counter
- interface_ip_arr[$if_counter]=$new_ip
- subnet_mask=$(find_netmask $interface)
- if [ "$if_counter" -eq 1 ]; then
- private_subnet_mask=$subnet_mask
- private_short_subnet_mask=$(find_short_netmask $interface)
- fi
- if [ "$if_counter" -eq 2 ]; then
- public_subnet_mask=$subnet_mask
- public_short_subnet_mask=$(find_short_netmask $interface)
- fi
- if [ "$if_counter" -eq 3 ]; then
- storage_subnet_mask=$subnet_mask
- fi
- sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
- ((if_counter++))
-done
-
-##now remove interface config in Vagrantfile for 1 node
-##if 1, 3, or 4 interfaces set deployment type
-##if 2 interfaces remove 2nd interface and set deployment type
-if [ "$if_counter" == 1 ]; then
- deployment_type="single_network"
- remove_vagrant_network eth_replace1
- remove_vagrant_network eth_replace2
- remove_vagrant_network eth_replace3
-elif [ "$if_counter" == 2 ]; then
- deployment_type="single_network"
- second_interface=`echo $output | awk '{print $2}'`
- remove_vagrant_network $second_interface
- remove_vagrant_network eth_replace2
-elif [ "$if_counter" == 3 ]; then
- deployment_type="three_network"
- remove_vagrant_network eth_replace3
-else
- deployment_type="multi_network"
-fi
-
-echo "${blue}Network detected: ${deployment_type}! ${reset}"
-
-if route | grep default; then
- echo "${blue}Default Gateway Detected ${reset}"
- host_default_gw=$(ip route | grep default | awk '{print $3}')
- echo "${blue}Default Gateway: $host_default_gw ${reset}"
- default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
- case "${interface_arr[$default_gw_interface]}" in
- 0)
- echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
- node_default_gw=$host_default_gw
- ;;
- 1)
- echo "${red}Default Gateway Detected on Private Interface!${reset}"
- echo "${red}Private subnet should be private and not have Internet access!${reset}"
- exit 1
- ;;
- 2)
- echo "${blue}Default Gateway Detected on Public Interface!${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
- echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
- sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile
- echo "${blue}Setting node gateway to be VM Admin IP${reset}"
- node_default_gw=${interface_ip_arr[0]}
- public_gateway=$default_gw
- ;;
- 3)
- echo "${red}Default Gateway Detected on Storage Interface!${reset}"
- echo "${red}Storage subnet should be private and not have Internet access!${reset}"
- exit 1
- ;;
- *)
- echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
- exit 1
- ;;
- esac
-else
- #assumes 24 bit mask
- defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
- firstip=.1
- defaultgw=$defaultgw$firstip
- echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile
- node_default_gw=$defaultgw
-fi
-
-if [ $base_config ]; then
- if ! cp -f $base_config opnfv_ksgen_settings.yml; then
- echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
+ ##clone artifacts and move into foreman_vm dir
+ if ! GIT_SSL_NO_VERIFY=true git clone https://gerrit.opnfv.org/gerrit/genesis.git; then
+ printf '%s\n' 'deploy.sh: Unable to clone genesis repo' >&2
exit 1
fi
-fi
-
-if [ $no_parse ]; then
-echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}"
-else
-
-echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}"
-##Edit the ksgen settings appropriately
-##ksgen settings will be stored in /vagrant on the vagrant machine
-##if single node deployment all the variables will have the same ip
-##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
-
-sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
+ mv -f /tmp/genesis/foreman/ci $vm_dir/foreman_vm
+ rm -rf /tmp/genesis/
+}
-##replace private interface parameter
-##private interface will be of hosts, so we need to know the provisioned host interface name
-##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts
-##replace IP for parameters with next IP that will be given to controller
-if [ "$deployment_type" == "single_network" ]; then
- ##we also need to assign IP addresses to nodes
- ##for single node, foreman is managing the single network, so we can't reserve them
- ##not supporting single network anymore for now
- echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}"
- exit 0
+##validates the network settings and update VagrantFile with network settings
+##params: none
+##usage: configure_network()
+configure_network() {
+ cd $vm_dir/foreman_vm
-elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then
+ echo "${blue}Detecting network configuration...${reset}"
+ ##detect host 1 or 3 interface configuration
+ #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
+ output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
- if [ "$deployment_type" == "three_network" ]; then
- sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml
+ if [ ! "$output" ]; then
+ printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
+ exit 1
fi
- sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml
+ ##virtual we only find 1 interface
+ if [ $virtual ]; then
+ ##find interface with default gateway
+ this_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $this_default_gw ${reset}"
+ this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}')
- ##get ip addresses for private network on controllers to make dhcp entries
- ##required for controllers_ip_array global param
- next_private_ip=${interface_ip_arr[1]}
- type=_private
- for node in controller1 controller2 controller3; do
- next_private_ip=$(next_usable_ip $next_private_ip)
- if [ ! "$next_private_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
- exit 1
+ ##find interface IP, make sure its valid
+ interface_ip=$(find_ip $this_default_gw_interface)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Interface ${this_default_gw_interface} does not have an IP: $interface_ip ! Exiting ${reset}"
+ exit 1
fi
- sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
- controller_ip_array=$controller_ip_array$next_private_ip,
- done
- ##replace global param for contollers_ip_array
- controller_ip_array=${controller_ip_array%?}
- sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
-
- ##now replace all the VIP variables. admin//private can be the same IP
- ##we have to use IP's here that won't be allocated to hosts at provisioning time
- ##therefore we increment the ip by 10 to make sure we have a safe buffer
- next_private_ip=$(increment_ip $next_private_ip 10)
+ ##set variable info
+ if [ ! -z "$static_ip_range" ]; then
+ new_ip=$(echo $static_ip_range | cut -d , -f1)
+ else
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ echo "${red} Cannot find next IP on interface ${this_default_gw_interface} new_ip: $new_ip ! Exiting ${reset}"
+ exit 1
+ fi
+ fi
+ interface=$this_default_gw_interface
+ public_interface=$interface
+ interface_arr[$interface]=2
+ interface_ip_arr[2]=$new_ip
+ subnet_mask=$(find_netmask $interface)
+ public_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $interface)
- grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do
- sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
- next_private_ip=$(next_usable_ip $next_private_ip)
- if [ ! "$next_private_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
- exit 1
+ if ! verify_subnet_size $public_subnet_mask 25; then
+ echo "${red} Not enough IPs in public subnet: $interface_ip_arr[2] ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
fi
- done
- ##replace foreman site
- next_public_ip=${interface_ip_arr[2]}
- sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
- ##replace public vips
- next_public_ip=$(increment_ip $next_public_ip 10)
- grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do
- sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
- next_public_ip=$(next_usable_ip $next_public_ip)
- if [ ! "$next_public_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
+ ##set that interface to be public
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ if_counter=1
+ else
+ ##find number of interfaces with ip and substitute in VagrantFile
+ if_counter=0
+ for interface in ${output}; do
+
+ if [ "$if_counter" -ge 4 ]; then
+ break
+ fi
+ interface_ip=$(find_ip $interface)
+ if [ ! "$interface_ip" ]; then
+ continue
+ fi
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ continue
+ fi
+ interface_arr[$interface]=$if_counter
+ interface_ip_arr[$if_counter]=$new_ip
+ subnet_mask=$(find_netmask $interface)
+ if [ "$if_counter" -eq 0 ]; then
+ admin_subnet_mask=$subnet_mask
+ if ! verify_subnet_size $admin_subnet_mask 5; then
+ echo "${red} Not enough IPs in admin subnet: ${interface_ip_arr[$if_counter]} ${admin_subnet_mask}. Need at least 5 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+
+ elif [ "$if_counter" -eq 1 ]; then
+ private_subnet_mask=$subnet_mask
+ private_short_subnet_mask=$(find_short_netmask $interface)
+
+ if ! verify_subnet_size $private_subnet_mask 15; then
+ echo "${red} Not enough IPs in private subnet: ${interface_ip_arr[$if_counter]} ${private_subnet_mask}. Need at least 15 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ elif [ "$if_counter" -eq 2 ]; then
+ public_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $interface)
+
+ if ! verify_subnet_size $public_subnet_mask 25; then
+ echo "${red} Not enough IPs in public subnet: ${interface_ip_arr[$if_counter]} ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ elif [ "$if_counter" -eq 3 ]; then
+ storage_subnet_mask=$subnet_mask
+
+ if ! verify_subnet_size $storage_subnet_mask 10; then
+ echo "${red} Not enough IPs in storage subnet: ${interface_ip_arr[$if_counter]} ${storage_subnet_mask}. Need at least 10 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ else
+ echo "${red}ERROR: interface counter outside valid range of 0 to 3: $if_counter ! ${reset}"
+ exit 1
+ fi
+ sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ ((if_counter++))
+ done
+ fi
+ ##now remove interface config in Vagrantfile for 1 node
+ ##if 1, 3, or 4 interfaces set deployment type
+ ##if 2 interfaces remove 2nd interface and set deployment type
+ if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then
+ if [ $virtual ]; then
+ deployment_type="single_network"
+ echo "${blue}Single network detected for Virtual deployment...converting to three_network with internal networks! ${reset}"
+ private_internal_ip=155.1.2.2
+ admin_internal_ip=156.1.2.2
+ private_subnet_mask=255.255.255.0
+ private_short_subnet_mask=/24
+ interface_ip_arr[1]=$private_internal_ip
+ interface_ip_arr[0]=$admin_internal_ip
+ admin_subnet_mask=255.255.255.0
+ admin_short_subnet_mask=/24
+ sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", ip: '\""$private_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$admin_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ remove_vagrant_network eth_replace3
+ deployment_type=three_network
+ else
+ echo "${blue}Single network or 2 network detected for baremetal deployment. This is unsupported! Exiting. ${reset}"
exit 1
fi
- done
-
- ##replace public_network param
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
- ##replace private_network param
- private_subnet=$(find_subnet $next_private_ip $private_subnet_mask)
- sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
- ##replace storage_network
- if [ "$deployment_type" == "three_network" ]; then
- sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ elif [ "$if_counter" == 3 ]; then
+ deployment_type="three_network"
+ remove_vagrant_network eth_replace3
else
- next_storage_ip=${interface_ip_arr[3]}
- storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask)
- sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml
+ deployment_type="multi_network"
fi
- ##replace public_subnet param
- public_subnet=$public_subnet'\'$public_short_subnet_mask
- sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
- ##replace private_subnet param
- private_subnet=$private_subnet'\'$private_short_subnet_mask
- sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml
-
- ##replace public_dns param to be foreman server
- sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml
-
- ##replace public_gateway
- if [ -z "$public_gateway" ]; then
- ##if unset then we assume its the first IP in the public subnet
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- public_gateway=$(increment_subnet $public_subnet 1)
- fi
- sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml
-
- ##we have to define an allocation range of the public subnet to give
- ##to neutron to use as floating IPs
- ##we should control this subnet, so this range should work .150-200
- ##but generally this is a bad idea and we are assuming at least a /24 subnet here
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- public_allocation_start=$(increment_subnet $public_subnet 150)
- public_allocation_end=$(increment_subnet $public_subnet 200)
-
- sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
- sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
-
-else
- printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2
- exit 1
-fi
-
-echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}"
-
-fi
-
-if [ $virtual ]; then
- echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}"
- sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh
-fi
-
-echo "${blue}Starting Vagrant! ${reset}"
-
-##stand up vagrant
-if ! vagrant up; then
- printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2
- exit 1
-else
- echo "${blue}Foreman VM is up! ${reset}"
-fi
-
-if [ $virtual ]; then
-
-##Bring up VM nodes
-echo "${blue}Setting VMs up... ${reset}"
-nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
-##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first
-##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have
-##3 static controllers
-compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
-controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
-nodes=${controller_nodes}${compute_nodes}
-
-for node in ${nodes}; do
- cd /tmp
-
- ##remove VM nodes incase it wasn't cleaned up
- rm -rf /tmp/$node
-
- ##clone bgs vagrant
- ##will change this to be opnfv repo when commit is done
- if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then
- printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
- exit 1
+ echo "${blue}Network detected: ${deployment_type}! ${reset}"
+
+ if [ $virtual ]; then
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*disable_dhcp_flag =.*$/ disable_dhcp_flag = true/' Vagrantfile
+ if [ $static_ip_range ]; then
+ ##verify static range is at least 20 IPs
+ static_ip_range_begin=$(echo $static_ip_range | cut -d , -f1)
+ static_ip_range_end=$(echo $static_ip_range | cut -d , -f2)
+ ##verify range is at least 20 ips
+ ##assumes less than 255 range pool
+ begin_octet=$(echo $static_ip_range_begin | cut -d . -f4)
+ end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+ ip_count=$((end_octet-begin_octet+1))
+ if [ "$ip_count" -lt 20 ]; then
+ echo "${red}Static range is less than 20 ips: ${ip_count}, exiting ${reset}"
+ exit 1
+ else
+ echo "${blue}Static IP range is size $ip_count ${reset}"
+ fi
+ fi
+ fi
fi
- cd $node
+ if route | grep default; then
+ echo "${blue}Default Gateway Detected ${reset}"
+ host_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $host_default_gw ${reset}"
+ default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
+ case "${interface_arr[$default_gw_interface]}" in
+ 0)
+ echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+ node_default_gw=$host_default_gw
+ ;;
+ 1)
+ echo "${red}Default Gateway Detected on Private Interface!${reset}"
+ echo "${red}Private subnet should be private and not have Internet access!${reset}"
+ exit 1
+ ;;
+ 2)
+ echo "${blue}Default Gateway Detected on Public Interface!${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+ echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
+ sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile
+ echo "${blue}Setting node gateway to be VM Admin IP${reset}"
+ node_default_gw=${interface_ip_arr[0]}
+ public_gateway=$default_gw
+ ;;
+ 3)
+ echo "${red}Default Gateway Detected on Storage Interface!${reset}"
+ echo "${red}Storage subnet should be private and not have Internet access!${reset}"
+ exit 1
+ ;;
+ *)
+ echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
+ exit 1
+ ;;
+ esac
+ else
+ #assumes 24 bit mask
+ defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
+ firstip=.1
+ defaultgw=$defaultgw$firstip
+ echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile
+ node_default_gw=$defaultgw
+ fi
if [ $base_config ]; then
if ! cp -f $base_config opnfv_ksgen_settings.yml; then
@@ -582,114 +650,479 @@ for node in ${nodes}; do
fi
fi
- ##parse yaml into variables
- eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
- ##find node type
- node_type=config_nodes_${node}_type
- node_type=$(eval echo \$$node_type)
-
- ##find number of interfaces with ip and substitute in VagrantFile
- output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
-
- if [ ! "$output" ]; then
- printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
- exit 1
- fi
-
-
- if_counter=0
- for interface in ${output}; do
+ if [ $no_parse ]; then
+ echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}"
- if [ "$if_counter" -ge 4 ]; then
- break
- fi
- interface_ip=$(find_ip $interface)
- if [ ! "$interface_ip" ]; then
- continue
- fi
- case "${if_counter}" in
- 0)
- mac_string=config_nodes_${node}_mac_address
- mac_addr=$(eval echo \$$mac_string)
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- if [ $mac_addr == "" ]; then
- echo "${red} Unable to find mac_address for $node! ${reset}"
- exit 1
- fi
- ;;
- 1)
- if [ "$node_type" == "controller" ]; then
- mac_string=config_nodes_${node}_private_mac
- mac_addr=$(eval echo \$$mac_string)
- if [ $mac_addr == "" ]; then
- echo "${red} Unable to find private_mac for $node! ${reset}"
- exit 1
- fi
- else
- ##generate random mac
- mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
- fi
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- ;;
- *)
- mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- ;;
- esac
- sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
- ((if_counter++))
- done
-
- ##now remove interface config in Vagrantfile for 1 node
- ##if 1, 3, or 4 interfaces set deployment type
- ##if 2 interfaces remove 2nd interface and set deployment type
- if [ "$if_counter" == 1 ]; then
- deployment_type="single_network"
- remove_vagrant_network eth_replace1
- remove_vagrant_network eth_replace2
- remove_vagrant_network eth_replace3
- elif [ "$if_counter" == 2 ]; then
- deployment_type="single_network"
- second_interface=`echo $output | awk '{print $2}'`
- remove_vagrant_network $second_interface
- remove_vagrant_network eth_replace2
- elif [ "$if_counter" == 3 ]; then
- deployment_type="three_network"
- remove_vagrant_network eth_replace3
else
- deployment_type="multi_network"
- fi
- ##modify provisioning to do puppet install, config, and foreman check-in
- ##substitute host_name and dns_server in the provisioning script
- host_string=config_nodes_${node}_hostname
- host_name=$(eval echo \$$host_string)
- sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
- ##dns server should be the foreman server
- sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+ echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}"
+ ##Edit the ksgen settings appropriately
+ ##ksgen settings will be stored in /vagrant on the vagrant machine
+ ##if single node deployment all the variables will have the same ip
+ ##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
+
+ sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
+
+ ##replace private interface parameter
+ ##private interface will be of hosts, so we need to know the provisioned host interface name
+ ##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts
+ ##replace IP for parameters with next IP that will be given to controller
+ if [ "$deployment_type" == "single_network" ]; then
+ ##we also need to assign IP addresses to nodes
+ ##for single node, foreman is managing the single network, so we can't reserve them
+ ##not supporting single network anymore for now
+ echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}"
+ exit 0
+
+ elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then
+
+ if [ "$deployment_type" == "three_network" ]; then
+ sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml
+ fi
+
+ sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml
+
+ ##get ip addresses for private network on controllers to make dhcp entries
+ ##required for controllers_ip_array global param
+ next_private_ip=${interface_ip_arr[1]}
+ type=_private
+ control_count=0
+ for node in controller1 controller2 controller3; do
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
+ exit 1
+ fi
+ sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ controller_ip_array=$controller_ip_array$next_private_ip,
+ controllers_ip_arr[$control_count]=$next_private_ip
+ ((control_count++))
+ done
+
+ next_public_ip=${interface_ip_arr[2]}
+ foreman_ip=$next_public_ip
+
+ ##if no dhcp, find all the Admin IPs for nodes in advance
+ if [ $virtual ]; then
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*no_dhcp:.*$/no_dhcp: true/' opnfv_ksgen_settings.yml
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ next_admin_ip=${interface_ip_arr[0]}
+ type=_admin
+ for node in ${nodes}; do
+ next_admin_ip=$(next_ip $next_admin_ip)
+ if [ ! "$next_admin_ip" ]; then
+ echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+ exit 1
+ else
+ admin_ip_arr[$node]=$next_admin_ip
+ sed -i 's/'"$node$type"'/'"$next_admin_ip"'/g' opnfv_ksgen_settings.yml
+ fi
+ done
+
+ ##allocate node public IPs
+ for node in ${nodes}; do
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ if [ ! "$next_public_ip" ]; then
+ echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+ exit 1
+ else
+ public_ip_arr[$node]=$next_public_ip
+ fi
+ done
+ fi
+ fi
+ ##replace global param for controllers_ip_array
+ controller_ip_array=${controller_ip_array%?}
+ sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
+
+ ##now replace all the VIP variables. admin//private can be the same IP
+ ##we have to use IP's here that won't be allocated to hosts at provisioning time
+ ##therefore we increment the ip by 10 to make sure we have a safe buffer
+ next_private_ip=$(increment_ip $next_private_ip 10)
+
+ private_output=$(grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml)
+ if [ ! -z "$private_output" ]; then
+ while read -r line; do
+ sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
+ exit 1
+ fi
+ done <<< "$private_output"
+ fi
+
+ ##replace odl_control_ip (non-HA only)
+ odl_control_ip=${controllers_ip_arr[0]}
+ sed -i 's/^.*odl_control_ip:.*$/ odl_control_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
+
+ ##replace controller_ip (non-HA only)
+ sed -i 's/^.*controller_ip:.*$/ controller_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
+
+ ##replace foreman site
+ sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$foreman_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
+ ##replace public vips
+ ##no need to do this if no dhcp
+ if [[ -z "$enable_virtual_dhcp" && ! -z "$virtual" ]]; then
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ else
+ next_public_ip=$(increment_ip $next_public_ip 10)
+ fi
+
+ public_output=$(grep -E '*public_vip' opnfv_ksgen_settings.yml)
+ if [ ! -z "$public_output" ]; then
+ while read -r line; do
+ if echo $line | grep horizon_public_vip; then
+ horizon_public_vip=$next_public_ip
+ fi
+ sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ if [ ! "$next_public_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
+ exit 1
+ fi
+ done <<< "$public_output"
+ fi
+
+ ##replace public_network param
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ ##replace private_network param
+ private_subnet=$(find_subnet $next_private_ip $private_subnet_mask)
+ sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ ##replace storage_network
+ if [ "$deployment_type" == "three_network" ]; then
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ next_storage_ip=${interface_ip_arr[3]}
+ storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask)
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace public_subnet param
+ public_subnet=$public_subnet'\'$public_short_subnet_mask
+ sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ ##replace private_subnet param
+ private_subnet=$private_subnet'\'$private_short_subnet_mask
+ sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+
+ ##replace public_dns param to be foreman server
+ sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml
+
+ ##replace public_gateway
+ if [ -z "$public_gateway" ]; then
+ ##if unset then we assume its the first IP in the public subnet
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ public_gateway=$(increment_subnet $public_subnet 1)
+ fi
+ sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml
+
+ ##we have to define an allocation range of the public subnet to give
+ ##to neutron to use as floating IPs
+ ##if static ip range, then we take the difference of the end range and current ip
+ ## to be the allocation pool
+ ##if not static ip, we will use the last 20 IP from the subnet
+ ## note that this is not a really good idea because the subnet must be at least a /27 for this to work...
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ if [ ! -z "$static_ip_range" ]; then
+ begin_octet=$(echo $next_public_ip | cut -d . -f4)
+ end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+ ip_diff=$((end_octet-begin_octet))
+ if [ $ip_diff -le 0 ]; then
+ echo "${red}ip range left for floating range is less than or equal to 0! $ipdiff ${reset}"
+ exit 1
+ else
+ public_allocation_start=$(next_ip $next_public_ip)
+ public_allocation_end=$static_ip_range_end
+ fi
+ else
+ last_ip_subnet=$(find_last_ip_subnet $next_public_ip $public_subnet_mask)
+ public_allocation_start=$(subtract_ip $last_ip_subnet $floating_ip_count )
+ public_allocation_end=${last_ip_subnet}
+ fi
+ echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}"
+
+ sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
+ sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
+
+ else
+ printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2
+ exit 1
+ fi
- ## remove bootstrap and NAT provisioning
- sed -i '/nat_setup.sh/d' Vagrantfile
- sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+ echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}"
- ## modify default_gw to be node_default_gw
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+ fi
+}
- ## modify VM memory to be 4gig
- sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile
+##Configure bootstrap.sh to use the virtual Khaleesi playbook
+##params: none
+##usage: configure_virtual()
+configure_virtual() {
+ if [ $virtual ]; then
+ echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}"
+ sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh
+ fi
+}
- echo "${blue}Starting Vagrant Node $node! ${reset}"
+##Starts Foreman VM with Vagrant
+##params: none
+##usage: start_vagrant()
+start_foreman() {
+ echo "${blue}Starting Vagrant! ${reset}"
##stand up vagrant
if ! vagrant up; then
- echo "${red} Unable to start $node ${reset}"
+ printf '%s\n' 'deploy.sh: Unable to complete Foreman VM install' >&2
exit 1
else
- echo "${blue} $node VM is up! ${reset}"
+ echo "${blue}Foreman VM is up! ${reset}"
+ fi
+}
+
+##start the VM if this is a virtual installation
+##this function does nothing if baremetal servers are being used
+##params: none
+##usage: start_virtual_nodes()
+start_virtual_nodes() {
+ if [ $virtual ]; then
+
+ ##Bring up VM nodes
+ echo "${blue}Setting VMs up... ${reset}"
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ ##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first
+ ##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have
+ ##3 static controllers
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ controller_count=0
+ compute_wait_completed=false
+
+ for node in ${nodes}; do
+ cd /tmp/
+
+ ##remove VM nodes incase it wasn't cleaned up
+ rm -rf $vm_dir/$node
+ rm -rf /tmp/genesis/
+
+ ##clone genesis and move into node folder
+ if ! GIT_SSL_NO_VERIFY=true git clone https://gerrit.opnfv.org/gerrit/genesis.git; then
+ printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
+ exit 1
+ fi
+
+ mv -f /tmp/genesis/foreman/ci $vm_dir/$node
+ rm -rf /tmp/genesis/
+
+ cd $vm_dir/$node
+
+ if [ $base_config ]; then
+ if ! cp -f $base_config opnfv_ksgen_settings.yml; then
+ echo "${red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
+ exit 1
+ fi
+ fi
+
+ ##parse yaml into variables
+ eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
+ ##find node type
+ node_type=config_nodes_${node}_type
+ node_type=$(eval echo \$$node_type)
+
+ ##trozet test make compute nodes wait 20 minutes
+ if [ "$compute_wait_completed" = false ] && [ "$node_type" != "controller" ]; then
+ echo "${blue}Waiting 20 minutes for Control nodes to install before continuing with Compute nodes..."
+ compute_wait_completed=true
+ sleep 1400
+ fi
+
+ ##find number of interfaces with ip and substitute in VagrantFile
+ output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
+
+ if [ ! "$output" ]; then
+ printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
+ exit 1
+ fi
+
+ if_counter=0
+ for interface in ${output}; do
+
+ if [ -z "$enable_virtual_dhcp" ]; then
+ if [ "$if_counter" -ge 1 ]; then
+ break
+ fi
+ elif [ "$if_counter" -ge 4 ]; then
+ break
+ fi
+ interface_ip=$(find_ip $interface)
+ if [ ! "$interface_ip" ]; then
+ continue
+ fi
+ case "${if_counter}" in
+ 0)
+ mac_string=config_nodes_${node}_mac_address
+ mac_addr=$(eval echo \$$mac_string)
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find mac_address for $node! ${reset}"
+ exit 1
+ fi
+ ;;
+ 1)
+ if [ "$node_type" == "controller" ]; then
+ mac_string=config_nodes_${node}_private_mac
+ mac_addr=$(eval echo \$$mac_string)
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find private_mac for $node! ${reset}"
+ exit 1
+ fi
+ else
+ ##generate random mac
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ fi
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ ;;
+ *)
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ ;;
+ esac
+ this_admin_ip=${admin_ip_arr[$node]}
+ sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile
+ ((if_counter++))
+ done
+ ##now remove interface config in Vagrantfile for 1 node
+ ##if 1, 3, or 4 interfaces set deployment type
+ ##if 2 interfaces remove 2nd interface and set deployment type
+ if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then
+ deployment_type="single_network"
+ if [ "$node_type" == "controller" ]; then
+ mac_string=config_nodes_${node}_private_mac
+ mac_addr=$(eval echo \$$mac_string)
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find private_mac for $node! ${reset}"
+ exit 1
+ fi
+ else
+ ##generate random mac
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ fi
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ "$node_type" == "controller" ]; then
+ new_node_ip=${controllers_ip_arr[$controller_count]}
+ if [ ! "$new_node_ip" ]; then
+ echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}"
+ exit 1
+ fi
+ ((controller_count++))
+ else
+ next_private_ip=$(next_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ echo "{red}ERROR: Could not find private ip for $node ${reset}"
+ exit 1
+ fi
+ new_node_ip=$next_private_ip
+ fi
+ sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ ##replace host_ip in vm_nodes_provision with private ip
+ sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh
+ ##replace ping site
+ if [ ! -z "$ping_site" ]; then
+ sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh
+ fi
+ ##find public ip info
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ this_public_ip=${public_ip_arr[$node]}
+
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile
+ else
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
+ fi
+ remove_vagrant_network eth_replace3
+ elif [ "$if_counter" == 3 ]; then
+ deployment_type="three_network"
+ remove_vagrant_network eth_replace3
+ else
+ deployment_type="multi_network"
+ fi
+ ##modify provisioning to do puppet install, config, and foreman check-in
+ ##substitute host_name and dns_server in the provisioning script
+ host_string=config_nodes_${node}_hostname
+ host_name=$(eval echo \$$host_string)
+ sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
+ ##dns server should be the foreman server
+ sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+ ## remove bootstrap and NAT provisioning
+ sed -i '/nat_setup.sh/d' Vagrantfile
+ sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+ ## modify default_gw to be node_default_gw
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+ ## modify VM memory to be 4gig
+ ##if node type is controller
+ if [ "$node_type" == "controller" ]; then
+ sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile
+ fi
+ echo "${blue}Starting Vagrant Node $node! ${reset}"
+ ##stand up vagrant
+ if ! vagrant up; then
+ echo "${red} Unable to start $node ${reset}"
+ exit 1
+ else
+ echo "${blue} $node VM is up! ${reset}"
+ fi
+ done
+ echo "${blue} All VMs are UP! ${reset}"
+ echo "${blue} Waiting for puppet to complete on the nodes... ${reset}"
+ ##check puppet is complete
+ ##ssh into foreman server, run check to verify puppet is complete
+ pushd $vm_dir/foreman_vm
+ if ! vagrant ssh -c "/opt/khaleesi/run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml /opt/khaleesi/playbooks/validate_opnfv-vm.yml"; then
+ echo "${red} Failed to validate puppet completion on nodes ${reset}"
+ exit 1
+ else
+ echo "{$blue} Puppet complete on all nodes! ${reset}"
+ fi
+ popd
+ ##add routes back to nodes
+ for node in ${nodes}; do
+ pushd $vm_dir/$node
+ if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then
+ echo "${blue} Adding public route back to $node! ${reset}"
+ vagrant ssh -c "route add default gw $this_default_gw"
+ fi
+ popd
+ done
+ if [ ! -z "$horizon_public_vip" ]; then
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_public_vip} ${reset}"
+ else
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${odl_control_ip} ${reset}"
+ fi
fi
+}
-done
+##END FUNCTIONS
- echo "${blue} All VMs are UP! ${reset}"
+main() {
+ parse_cmdline "$@"
+ disable_selinux
+ install_EPEL
+ install_vbox
+ install_ansible
+ install_vagrant
+ clean_tmp
+ verify_vm_dir
+ clone_bgs
+ configure_network
+ configure_virtual
+ start_foreman
+ start_virtual_nodes
+}
-fi
+main "$@"
diff --git a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
index 72935c9..2c146a0 100644
--- a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
+++ b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
@@ -105,9 +105,9 @@ nodes:
type: compute
host_type: baremetal
hostgroup: Compute
- mac_address: "00:25:b5:a0:00:5e"
- bmc_ip: 172.30.8.74
- bmc_mac: "74:a2:e6:a4:14:9c"
+ mac_address: "00:25:B5:A0:00:2A"
+ bmc_ip: 172.30.8.75
+ bmc_mac: "a8:9d:21:c9:8b:56"
bmc_user: admin
bmc_pass: octopus
ansible_ssh_pass: "Op3nStack"
@@ -125,9 +125,9 @@ nodes:
type: compute
host_type: baremetal
hostgroup: Compute
- mac_address: "00:25:b5:a0:00:3e"
- bmc_ip: 172.30.8.73
- bmc_mac: "a8:9d:21:a0:15:9c"
+ mac_address: "00:25:B5:A0:00:3A"
+ bmc_ip: 172.30.8.65
+ bmc_mac: "a8:9d:21:c9:4d:26"
bmc_user: admin
bmc_pass: octopus
ansible_ssh_pass: "Op3nStack"
@@ -145,13 +145,13 @@ nodes:
type: controller
host_type: baremetal
hostgroup: Controller_Network_ODL
- mac_address: "00:25:b5:a0:00:af"
- bmc_ip: 172.30.8.66
- bmc_mac: "a8:9d:21:c9:8b:56"
+ mac_address: "00:25:B5:A0:00:4A"
+ bmc_ip: 172.30.8.74
+ bmc_mac: "a8:9d:21:c9:3a:92"
bmc_user: admin
bmc_pass: octopus
private_ip: controller1_private
- private_mac: "00:25:b5:b0:00:1f"
+ private_mac: "00:25:B5:A0:00:4B"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
groups:
@@ -167,13 +167,13 @@ nodes:
type: controller
host_type: baremetal
hostgroup: Controller_Network
- mac_address: "00:25:b5:a0:00:9e"
- bmc_ip: 172.30.8.75
- bmc_mac: "a8:9d:21:c9:4d:26"
+ mac_address: "00:25:B5:A0:00:5A"
+ bmc_ip: 172.30.8.73
+ bmc_mac: "74:a2:e6:a4:14:9c"
bmc_user: admin
bmc_pass: octopus
private_ip: controller2_private
- private_mac: "00:25:b5:b0:00:de"
+ private_mac: "00:25:B5:A0:00:5B"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
groups:
@@ -189,13 +189,13 @@ nodes:
type: controller
host_type: baremetal
hostgroup: Controller_Network
- mac_address: "00:25:b5:a0:00:7e"
- bmc_ip: 172.30.8.65
- bmc_mac: "a8:9d:21:c9:3a:92"
+ mac_address: "00:25:B5:A0:00:6A"
+ bmc_ip: 172.30.8.72
+ bmc_mac: "a8:9d:21:a0:15:9c"
bmc_user: admin
bmc_pass: octopus
private_ip: controller3_private
- private_mac: "00:25:b5:b0:00:be"
+ private_mac: "00:25:B5:A0:00:6B"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
groups:
diff --git a/foreman/ci/opnfv_ksgen_settings.yml b/foreman/ci/opnfv_ksgen_settings.yml
index 21840dd..b41a41b 100644
--- a/foreman/ci/opnfv_ksgen_settings.yml
+++ b/foreman/ci/opnfv_ksgen_settings.yml
@@ -44,6 +44,7 @@ global_params:
deployment_type:
network_type: multi_network
default_gw:
+no_dhcp: false
foreman:
seed_values:
- { name: heat_cfn, oldvalue: true, newvalue: false }
@@ -110,6 +111,7 @@ nodes:
bmc_mac: "10:23:45:67:88:AB"
bmc_user: root
bmc_pass: root
+ admin_ip: compute_admin
ansible_ssh_pass: "Op3nStack"
admin_password: ""
groups:
@@ -130,6 +132,7 @@ nodes:
bmc_mac: "10:23:45:67:88:AC"
bmc_user: root
bmc_pass: root
+ admin_ip: controller1_admin
private_ip: controller1_private
private_mac: "10:23:45:67:87:AC"
ansible_ssh_pass: "Op3nStack"
@@ -152,6 +155,7 @@ nodes:
bmc_mac: "10:23:45:67:88:AD"
bmc_user: root
bmc_pass: root
+ admin_ip: controller2_admin
private_ip: controller2_private
private_mac: "10:23:45:67:87:AD"
ansible_ssh_pass: "Op3nStack"
@@ -174,6 +178,7 @@ nodes:
bmc_mac: "10:23:45:67:88:AE"
bmc_user: root
bmc_pass: root
+ admin_ip: controller3_admin
private_ip: controller3_private
private_mac: "10:23:45:67:87:AE"
ansible_ssh_pass: "Op3nStack"
diff --git a/foreman/ci/opnfv_ksgen_settings_no_HA.yml b/foreman/ci/opnfv_ksgen_settings_no_HA.yml
new file mode 100644
index 0000000..79db257
--- /dev/null
+++ b/foreman/ci/opnfv_ksgen_settings_no_HA.yml
@@ -0,0 +1,264 @@
+global_params:
+ admin_email: opnfv@opnfv.com
+ ha_flag: "false"
+ odl_flag: "true"
+ odl_control_ip:
+ private_network:
+ storage_network:
+ public_network:
+ private_subnet:
+ deployment_type:
+ controller_ip:
+network_type: multi_network
+default_gw:
+no_dhcp: false
+foreman:
+ seed_values:
+ - { name: heat_cfn, oldvalue: true, newvalue: false }
+workaround_puppet_version_lock: false
+opm_branch: master
+installer:
+ name: puppet
+ short_name: pupt
+ network:
+ auto_assign_floating_ip: false
+ variant:
+ short_name: m2vx
+ plugin:
+ name: neutron
+workaround_openstack_packstack_rpm: false
+tempest:
+ repo:
+ Fedora:
+ '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/
+ '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/
+ RedHat:
+ '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/
+ use_virtual_env: false
+ public_allocation_end: 10.2.84.71
+ skip:
+ files: null
+ tests: null
+ public_allocation_start: 10.2.84.51
+ physnet: physnet1
+ use_custom_repo: false
+ public_subnet_cidr: 10.2.84.0/24
+ public_subnet_gateway: 10.2.84.1
+ additional_default_settings:
+ - section: compute
+ option: flavor_ref
+ value: 1
+ cirros_image_file: cirros-0.3.1-x86_64-disk.img
+ setup_method: tempest/rpm
+ test_name: all
+ rdo:
+ version: juno
+ rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ rpm:
+ version: 20141201
+ dir: ~{{ nodes.tempest.remote_user }}/tempest-dir
+tmp:
+ node_prefix: '{{ node.prefix | reject("none") | join("-") }}-'
+ anchors:
+ - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ - http://repos.fedorapeople.org/repos/openstack/openstack-juno/
+opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git
+workaround_vif_plugging: false
+openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
+nodes:
+ compute:
+ name: oscompute11.opnfv.com
+ hostname: oscompute11.opnfv.com
+ short_name: oscompute11
+ type: compute
+ host_type: baremetal
+ hostgroup: Compute
+ mac_address: "10:23:45:67:89:AB"
+ bmc_ip: 10.4.17.2
+ bmc_mac: "10:23:45:67:88:AB"
+ bmc_user: root
+ bmc_pass: root
+ admin_ip: compute_admin
+ ansible_ssh_pass: "Op3nStack"
+ admin_password: ""
+ groups:
+ - compute
+ - foreman_nodes
+ - puppet
+ - rdo
+ - neutron
+ controller1:
+ name: oscontroller1.opnfv.com
+ hostname: oscontroller1.opnfv.com
+ short_name: oscontroller1
+ type: controller
+ host_type: baremetal
+ hostgroup: Controller_Network_ODL
+ mac_address: "10:23:45:67:89:AC"
+ bmc_ip: 10.4.17.3
+ bmc_mac: "10:23:45:67:88:AC"
+ bmc_user: root
+ bmc_pass: root
+ private_ip: controller1_private
+ admin_ip: controller1_admin
+ private_mac: "10:23:45:67:87:AC"
+ ansible_ssh_pass: "Op3nStack"
+ admin_password: "octopus"
+ groups:
+ - controller
+ - foreman_nodes
+ - puppet
+ - rdo
+ - neutron
+workaround_mysql_centos7: true
+distro:
+ name: centos
+ centos:
+ '7.0':
+ repos: []
+ short_name: c
+ short_version: 70
+ version: '7.0'
+ rhel:
+ '7.0':
+ kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/
+ repos:
+ - section: rhel7-server-rpms
+ name: Packages for RHEL 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/
+ gpgcheck: 0
+ - section: rhel-7-server-update-rpms
+ name: Update Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/
+ gpgcheck: 0
+ - section: rhel-7-server-optional-rpms
+ name: Optional Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/
+ gpgcheck: 0
+ - section: rhel-7-server-extras-rpms
+ name: Optional Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/
+ gpgcheck: 0
+ '6.5':
+ kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/
+ repos:
+ - section: rhel6.5-server-rpms
+ name: Packages for RHEL 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server
+ gpgcheck: 0
+ - section: rhel-6.5-server-update-rpms
+ name: Update Packages for Enterprise Linux 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/
+ gpgcheck: 0
+ - section: rhel-6.5-server-optional-rpms
+ name: Optional Packages for Enterprise Linux 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os
+ gpgcheck: 0
+ - section: rhel6.5-server-rpms-32bit
+ name: Packages for RHEL 6.5 - i386
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server
+ gpgcheck: 0
+ enabled: 1
+ - section: rhel-6.5-server-update-rpms-32bit
+ name: Update Packages for Enterprise Linux 6.5 - i686
+ baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/
+ gpgcheck: 0
+ enabled: 1
+ - section: rhel-6.5-server-optional-rpms-32bit
+ name: Optional Packages for Enterprise Linux 6.5 - i386
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os
+ gpgcheck: 0
+ enabled: 1
+ subscription:
+ username: REPLACE_ME
+ password: HWj8TE28Qi0eP2c
+ pool: 8a85f9823e3d5e43013e3ddd4e2a0977
+ config:
+ selinux: permissive
+ ntp_server: 0.pool.ntp.org
+ dns_servers:
+ - 10.4.1.1
+ - 10.4.0.2
+ reboot_delay: 1
+ initial_boot_timeout: 180
+node:
+ prefix:
+ - rdo
+ - pupt
+ - ffqiotcxz1
+ - null
+product:
+ repo_type: production
+ name: rdo
+ short_name: rdo
+ rpm:
+ CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ short_version: ju
+ repo:
+ production:
+ CentOS:
+ 7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ Fedora:
+ '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20
+ '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21
+ RedHat:
+ '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ version: juno
+ config:
+ enable_epel: y
+ short_repo: prod
+tester:
+ name: tempest
+distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' '
+job:
+ verbosity: 1
+ archive:
+ - '{{ tempest.dir }}/etc/tempest.conf'
+ - '{{ tempest.dir }}/etc/tempest.conf.sample'
+ - '{{ tempest.dir }}/*.log'
+ - '{{ tempest.dir }}/*.xml'
+ - /root/
+ - /var/log/
+ - /etc/nova
+ - /etc/ceilometer
+ - /etc/cinder
+ - /etc/glance
+ - /etc/keystone
+ - /etc/neutron
+ - /etc/ntp
+ - /etc/puppet
+ - /etc/qpid
+ - /etc/qpidd.conf
+ - /root
+ - /etc/yum.repos.d
+ - /etc/yum.repos.d
+topology:
+ name: multinode
+ short_name: mt
+workaround_neutron_ovs_udev_loop: true
+workaround_glance_table_utf8: false
+verbosity:
+ debug: 0
+ info: 1
+ warning: 2
+ warn: 2
+ errors: 3
+provisioner:
+ username: admin
+ network:
+ type: nova
+ name: external
+ skip: skip_provision
+ foreman_url: https://10.2.84.2/api/v2/
+ password: octopus
+ type: foreman
+workaround_nova_compute_fix: false
+workarounds:
+ enabled: true
+
diff --git a/foreman/ci/reload_playbook.yml b/foreman/ci/reload_playbook.yml
index 9e3d053..9b3a4d4 100644
--- a/foreman/ci/reload_playbook.yml
+++ b/foreman/ci/reload_playbook.yml
@@ -14,3 +14,4 @@
delay=60
timeout=180
sudo: false
+ - pause: minutes=1
diff --git a/foreman/ci/vm_nodes_provision.sh b/foreman/ci/vm_nodes_provision.sh
index d0bba64..e64c0ad 100755
--- a/foreman/ci/vm_nodes_provision.sh
+++ b/foreman/ci/vm_nodes_provision.sh
@@ -18,6 +18,7 @@ green=`tput setaf 2`
host_name=REPLACE
dns_server=REPLACE
+host_ip=REPLACE
##END VARS
##set hostname
@@ -31,27 +32,52 @@ if ! grep 'PEERDNS=no' /etc/sysconfig/network-scripts/ifcfg-enp0s3; then
systemctl restart NetworkManager
fi
-if ! ping www.google.com -c 5; then
+##modify /etc/resolv.conf to point to foreman
+echo "${blue} Configuring resolv.conf with DNS: $dns_server ${reset}"
+cat > /etc/resolv.conf << EOF
+search ci.com opnfv.com
+nameserver $dns_server
+nameserver 8.8.8.8
+
+EOF
+
+##modify /etc/hosts to add own IP for rabbitmq workaround
+host_short_name=`echo $host_name | cut -d . -f 1`
+echo "${blue} Configuring hosts with: $host_name $host_ip ${reset}"
+cat > /etc/hosts << EOF
+$host_ip $host_short_name $host_name
+127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
+::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
+EOF
+
+if ! ping www.google.com -c 5; then
echo "${red} No internet connection, check your route and DNS setup ${reset}"
exit 1
fi
-# Install EPEL repo for access to many other yum repos
-# Major version is pinned to force some consistency for Arno
-yum install -y epel-release-7*
+##install EPEL
+if ! yum repolist | grep "epel/"; then
+ if ! rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm; then
+ printf '%s\n' 'vm_provision_nodes.sh: Unable to configure EPEL repo' >&2
+ exit 1
+ fi
+else
+ printf '%s\n' 'vm_nodes_provision.sh: Skipping EPEL repo as it is already configured.'
+fi
-# Update device-mapper-libs, needed for libvirtd on compute nodes
-# Major version is pinned to force some consistency for Arno
-if ! yum -y upgrade device-mapper-libs-1*; then
+##install device-mapper-libs
+##needed for libvirtd on compute nodes
+if ! yum -y upgrade device-mapper-libs; then
echo "${red} WARN: Unable to upgrade device-mapper-libs...nova-compute may not function ${reset}"
fi
-# Install other required packages
-# Major version is pinned to force some consistency for Arno
echo "${blue} Installing Puppet ${reset}"
-if ! yum install -y puppet-3*; then
- printf '%s\n' 'vm_nodes_provision.sh: failed to install required packages' >&2
- exit 1
+##install puppet
+if ! yum list installed | grep -i puppet; then
+ if ! yum -y install puppet; then
+ printf '%s\n' 'vm_nodes_provision.sh: Unable to install puppet package' >&2
+ exit 1
+ fi
fi
echo "${blue} Configuring puppet ${reset}"
diff --git a/foreman/docs/src/installation-instructions.rst b/foreman/docs/src/installation-instructions.rst
index 2ac872d..20ea983 100644
--- a/foreman/docs/src/installation-instructions.rst
+++ b/foreman/docs/src/installation-instructions.rst
@@ -213,8 +213,6 @@ Follow the steps below to execute:
4. Your nodes will take 40-60 minutes to re-install CentOS 7 and install/configure OPNFV. When complete you will see "Finished: SUCCESS"
-.. _setup_verify:
-
Verifying the Setup
-------------------
@@ -236,9 +234,7 @@ Now that the installer has finished it is a good idea to check and make sure thi
7. Now go to your web browser and insert the Horizon public VIP. The login will be "admin"/"octopus".
-8. You are now able to follow the `OpenStack Verification <openstack_verify_>`_ section.
-
-.. _openstack_verify:
+8. You are now able to follow the `OpenStack Verification`_ section.
OpenStack Verification
----------------------
@@ -316,14 +312,14 @@ Follow the steps below to execute:
Verifying the Setup - VMs
-------------------------
-Follow the instructions in the `Verifying the Setup <setup_verify_>`_ section.
+Follow the instructions in the `Verifying the Setup`_ section.
Also, for VM deployment you are able to easily access your nodes by going to ``/tmp/<node name>`` and then ``vagrant ssh`` (password is "vagrant"). You can use this to go to a controller and check OpenStack services, OpenDaylight, etc.
OpenStack Verification - VMs
----------------------------
-Follow the steps in `OpenStack Verification <openstack_verify_>`_ section.
+Follow the steps in `OpenStack Verification`_ section.
Frequently Asked Questions
==========================
@@ -353,7 +349,11 @@ OpenStack
OpenDaylight
------------
-`OpenDaylight artifacts <http://www.opendaylight.org/software/downloads>`_
+Upstream OpenDaylight provides `a number of packaging and deployment options <https://wiki.opendaylight.org/view/Deployment>`_ meant for consumption by downstream projects like OPNFV.
+
+Currently, OPNFV Foreman uses `OpenDaylight's Puppet module <https://github.com/dfarrell07/puppet-opendaylight>`_, which in turn depends on `OpenDaylight's RPM <https://copr.fedoraproject.org/coprs/dfarrell07/OpenDaylight/>`_.
+
+Note that the RPM is currently hosted on Copr, but `will soon <https://trello.com/c/qseotfgL/171-host-odl-rpm-on-odl-infra>`_ be migrated to OpenDaylight's infrastructure and/or the new CentOS NFV SIG.
Foreman
-------
diff --git a/fuel/deploy/libvirt/vms/compute.xml b/fuel/deploy/libvirt/vms/compute.xml
index 2ea35ac..86a7613 100644
--- a/fuel/deploy/libvirt/vms/compute.xml
+++ b/fuel/deploy/libvirt/vms/compute.xml
@@ -8,6 +8,7 @@
<boot dev='network'/>
<boot dev='hd'/>
<bootmenu enable='yes'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>
diff --git a/fuel/deploy/libvirt/vms/controller.xml b/fuel/deploy/libvirt/vms/controller.xml
index 4377879..9e49b0f 100644
--- a/fuel/deploy/libvirt/vms/controller.xml
+++ b/fuel/deploy/libvirt/vms/controller.xml
@@ -7,6 +7,7 @@
<type arch='x86_64' machine='pc-1.0'>hvm</type>
<boot dev='network'/>
<boot dev='hd'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>
diff --git a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute4 b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute4
index 099c21e..ad5d4d1 100644
--- a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute4
+++ b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute4
@@ -9,6 +9,7 @@
<boot dev='network'/>
<boot dev='hd'/>
<bootmenu enable='yes'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>
diff --git a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute5 b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute5
index 76569e0..3905906 100644
--- a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute5
+++ b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute5
@@ -8,6 +8,7 @@
<type arch='x86_64' machine='pc-1.0'>hvm</type>
<boot dev='network'/>
<boot dev='hd'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>
diff --git a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller1 b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller1
index 715d4c4..ca1bd3b 100644
--- a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller1
+++ b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller1
@@ -8,6 +8,7 @@
<type arch='x86_64' machine='pc-1.0'>hvm</type>
<boot dev='network'/>
<boot dev='hd'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>